mirror of
https://github.com/coolsnowwolf/lede.git
synced 2025-06-15 16:15:29 +08:00
338 lines
10 KiB
Diff
338 lines
10 KiB
Diff
From b08e2f42e86b5848add254da45b56fc672e2bced Mon Sep 17 00:00:00 2001
|
|
From: Steven Price <steven.price@arm.com>
|
|
Date: Wed, 2 Oct 2024 15:16:29 +0100
|
|
Subject: [PATCH] irqchip/gic-v3-its: Share ITS tables with a non-trusted
|
|
hypervisor
|
|
|
|
Within a realm guest the ITS is emulated by the host. This means the
|
|
allocations must have been made available to the host by a call to
|
|
set_memory_decrypted(). Introduce an allocation function which performs
|
|
this extra call.
|
|
|
|
For the ITT use a custom genpool-based allocator that calls
|
|
set_memory_decrypted() for each page allocated, but then suballocates the
|
|
size needed for each ITT. Note that there is no mechanism implemented to
|
|
return pages from the genpool, but it is unlikely that the peak number of
|
|
devices will be much larger than the normal level - so this isn't expected
|
|
to be an issue.
|
|
|
|
Co-developed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
|
|
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
|
|
Signed-off-by: Steven Price <steven.price@arm.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Tested-by: Will Deacon <will@kernel.org>
|
|
Reviewed-by: Marc Zyngier <maz@kernel.org>
|
|
Link: https://lore.kernel.org/all/20241002141630.433502-2-steven.price@arm.com
|
|
---
|
|
drivers/irqchip/irq-gic-v3-its.c | 138 +++++++++++++++++++++++++------
|
|
1 file changed, 115 insertions(+), 23 deletions(-)
|
|
|
|
--- a/drivers/irqchip/irq-gic-v3-its.c
|
|
+++ b/drivers/irqchip/irq-gic-v3-its.c
|
|
@@ -12,12 +12,14 @@
|
|
#include <linux/crash_dump.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/efi.h>
|
|
+#include <linux/genalloc.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/iommu.h>
|
|
#include <linux/iopoll.h>
|
|
#include <linux/irqdomain.h>
|
|
#include <linux/list.h>
|
|
#include <linux/log2.h>
|
|
+#include <linux/mem_encrypt.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/msi.h>
|
|
@@ -27,6 +29,7 @@
|
|
#include <linux/of_pci.h>
|
|
#include <linux/of_platform.h>
|
|
#include <linux/percpu.h>
|
|
+#include <linux/set_memory.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/syscore_ops.h>
|
|
|
|
@@ -163,6 +166,7 @@ struct its_device {
|
|
struct its_node *its;
|
|
struct event_lpi_map event_map;
|
|
void *itt;
|
|
+ u32 itt_sz;
|
|
u32 nr_ites;
|
|
u32 device_id;
|
|
bool shared;
|
|
@@ -198,6 +202,87 @@ static DEFINE_IDA(its_vpeid_ida);
|
|
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
|
|
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
|
|
|
|
+static struct page *its_alloc_pages_node(int node, gfp_t gfp,
|
|
+ unsigned int order)
|
|
+{
|
|
+ struct page *page;
|
|
+ int ret = 0;
|
|
+
|
|
+ page = alloc_pages_node(node, gfp, order);
|
|
+
|
|
+ if (!page)
|
|
+ return NULL;
|
|
+
|
|
+ ret = set_memory_decrypted((unsigned long)page_address(page),
|
|
+ 1 << order);
|
|
+ /*
|
|
+ * If set_memory_decrypted() fails then we don't know what state the
|
|
+ * page is in, so we can't free it. Instead we leak it.
|
|
+ * set_memory_decrypted() will already have WARNed.
|
|
+ */
|
|
+ if (ret)
|
|
+ return NULL;
|
|
+
|
|
+ return page;
|
|
+}
|
|
+
|
|
+static struct page *its_alloc_pages(gfp_t gfp, unsigned int order)
|
|
+{
|
|
+ return its_alloc_pages_node(NUMA_NO_NODE, gfp, order);
|
|
+}
|
|
+
|
|
+static void its_free_pages(void *addr, unsigned int order)
|
|
+{
|
|
+ /*
|
|
+ * If the memory cannot be encrypted again then we must leak the pages.
|
|
+ * set_memory_encrypted() will already have WARNed.
|
|
+ */
|
|
+ if (set_memory_encrypted((unsigned long)addr, 1 << order))
|
|
+ return;
|
|
+ free_pages((unsigned long)addr, order);
|
|
+}
|
|
+
|
|
+static struct gen_pool *itt_pool;
|
|
+
|
|
+static void *itt_alloc_pool(int node, int size)
|
|
+{
|
|
+ unsigned long addr;
|
|
+ struct page *page;
|
|
+
|
|
+ if (size >= PAGE_SIZE) {
|
|
+ page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size));
|
|
+
|
|
+ return page ? page_address(page) : NULL;
|
|
+ }
|
|
+
|
|
+ do {
|
|
+ addr = gen_pool_alloc(itt_pool, size);
|
|
+ if (addr)
|
|
+ break;
|
|
+
|
|
+ page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 1);
|
|
+ if (!page)
|
|
+ break;
|
|
+
|
|
+ gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node);
|
|
+ } while (!addr);
|
|
+
|
|
+ return (void *)addr;
|
|
+}
|
|
+
|
|
+static void itt_free_pool(void *addr, int size)
|
|
+{
|
|
+ if (!addr)
|
|
+ return;
|
|
+
|
|
+ if (size >= PAGE_SIZE) {
|
|
+ its_free_pages(addr, get_order(size));
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ gen_pool_free(itt_pool, (unsigned long)addr, size);
|
|
+}
|
|
+
|
|
/*
|
|
* Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
|
|
* always have vSGIs mapped.
|
|
@@ -2192,7 +2277,8 @@ static struct page *its_allocate_prop_ta
|
|
{
|
|
struct page *prop_page;
|
|
|
|
- prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
|
|
+ prop_page = its_alloc_pages(gfp_flags,
|
|
+ get_order(LPI_PROPBASE_SZ));
|
|
if (!prop_page)
|
|
return NULL;
|
|
|
|
@@ -2203,8 +2289,7 @@ static struct page *its_allocate_prop_ta
|
|
|
|
static void its_free_prop_table(struct page *prop_page)
|
|
{
|
|
- free_pages((unsigned long)page_address(prop_page),
|
|
- get_order(LPI_PROPBASE_SZ));
|
|
+ its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ));
|
|
}
|
|
|
|
static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
|
|
@@ -2326,7 +2411,7 @@ static int its_setup_baser(struct its_no
|
|
order = get_order(GITS_BASER_PAGES_MAX * psz);
|
|
}
|
|
|
|
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
|
|
+ page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
|
|
if (!page)
|
|
return -ENOMEM;
|
|
|
|
@@ -2339,7 +2424,7 @@ static int its_setup_baser(struct its_no
|
|
/* 52bit PA is supported only when PageSize=64K */
|
|
if (psz != SZ_64K) {
|
|
pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
|
|
- free_pages((unsigned long)base, order);
|
|
+ its_free_pages(base, order);
|
|
return -ENXIO;
|
|
}
|
|
|
|
@@ -2395,7 +2480,7 @@ retry_baser:
|
|
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
|
|
&its->phys_base, its_base_type_string[type],
|
|
val, tmp);
|
|
- free_pages((unsigned long)base, order);
|
|
+ its_free_pages(base, order);
|
|
return -ENXIO;
|
|
}
|
|
|
|
@@ -2534,8 +2619,7 @@ static void its_free_tables(struct its_n
|
|
|
|
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
|
|
if (its->tables[i].base) {
|
|
- free_pages((unsigned long)its->tables[i].base,
|
|
- its->tables[i].order);
|
|
+ its_free_pages(its->tables[i].base, its->tables[i].order);
|
|
its->tables[i].base = NULL;
|
|
}
|
|
}
|
|
@@ -2801,7 +2885,7 @@ static bool allocate_vpe_l2_table(int cp
|
|
|
|
/* Allocate memory for 2nd level table */
|
|
if (!table[idx]) {
|
|
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
|
|
+ page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
|
|
if (!page)
|
|
return false;
|
|
|
|
@@ -2920,7 +3004,7 @@ static int allocate_vpe_l1_table(void)
|
|
|
|
pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
|
|
np, npg, psz, epp, esz);
|
|
- page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
|
|
+ page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
|
|
if (!page)
|
|
return -ENOMEM;
|
|
|
|
@@ -2966,8 +3050,7 @@ static struct page *its_allocate_pending
|
|
{
|
|
struct page *pend_page;
|
|
|
|
- pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
|
|
- get_order(LPI_PENDBASE_SZ));
|
|
+ pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ));
|
|
if (!pend_page)
|
|
return NULL;
|
|
|
|
@@ -2979,7 +3062,7 @@ static struct page *its_allocate_pending
|
|
|
|
static void its_free_pending_table(struct page *pt)
|
|
{
|
|
- free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
|
|
+ its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ));
|
|
}
|
|
|
|
/*
|
|
@@ -3314,8 +3397,8 @@ static bool its_alloc_table_entry(struct
|
|
|
|
/* Allocate memory for 2nd level table */
|
|
if (!table[idx]) {
|
|
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
|
|
- get_order(baser->psz));
|
|
+ page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
|
|
+ get_order(baser->psz));
|
|
if (!page)
|
|
return false;
|
|
|
|
@@ -3410,7 +3493,6 @@ static struct its_device *its_create_dev
|
|
if (WARN_ON(!is_power_of_2(nvecs)))
|
|
nvecs = roundup_pow_of_two(nvecs);
|
|
|
|
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
|
/*
|
|
* Even if the device wants a single LPI, the ITT must be
|
|
* sized as a power of two (and you need at least one bit...).
|
|
@@ -3418,7 +3500,11 @@ static struct its_device *its_create_dev
|
|
nr_ites = max(2, nvecs);
|
|
sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
|
|
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
|
|
- itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
|
|
+
|
|
+ itt = itt_alloc_pool(its->numa_node, sz);
|
|
+
|
|
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
|
+
|
|
if (alloc_lpis) {
|
|
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
|
|
if (lpi_map)
|
|
@@ -3430,9 +3516,9 @@ static struct its_device *its_create_dev
|
|
lpi_base = 0;
|
|
}
|
|
|
|
- if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
|
|
+ if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
|
|
kfree(dev);
|
|
- kfree(itt);
|
|
+ itt_free_pool(itt, sz);
|
|
bitmap_free(lpi_map);
|
|
kfree(col_map);
|
|
return NULL;
|
|
@@ -3442,6 +3528,7 @@ static struct its_device *its_create_dev
|
|
|
|
dev->its = its;
|
|
dev->itt = itt;
|
|
+ dev->itt_sz = sz;
|
|
dev->nr_ites = nr_ites;
|
|
dev->event_map.lpi_map = lpi_map;
|
|
dev->event_map.col_map = col_map;
|
|
@@ -3469,7 +3556,7 @@ static void its_free_device(struct its_d
|
|
list_del(&its_dev->entry);
|
|
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
|
|
kfree(its_dev->event_map.col_map);
|
|
- kfree(its_dev->itt);
|
|
+ itt_free_pool(its_dev->itt, its_dev->itt_sz);
|
|
kfree(its_dev);
|
|
}
|
|
|
|
@@ -5112,8 +5199,9 @@ static int __init its_probe_one(struct i
|
|
}
|
|
}
|
|
|
|
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
|
|
- get_order(ITS_CMD_QUEUE_SZ));
|
|
+ page = its_alloc_pages_node(its->numa_node,
|
|
+ GFP_KERNEL | __GFP_ZERO,
|
|
+ get_order(ITS_CMD_QUEUE_SZ));
|
|
if (!page) {
|
|
err = -ENOMEM;
|
|
goto out_unmap_sgir;
|
|
@@ -5177,7 +5265,7 @@ static int __init its_probe_one(struct i
|
|
out_free_tables:
|
|
its_free_tables(its);
|
|
out_free_cmd:
|
|
- free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
|
|
+ its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
|
|
out_unmap_sgir:
|
|
if (its->sgir_base)
|
|
iounmap(its->sgir_base);
|
|
@@ -5659,6 +5747,10 @@ int __init its_init(struct fwnode_handle
|
|
bool has_v4_1 = false;
|
|
int err;
|
|
|
|
+ itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1);
|
|
+ if (!itt_pool)
|
|
+ return -ENOMEM;
|
|
+
|
|
gic_rdists = rdists;
|
|
|
|
its_parent = parent_domain;
|