|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] xen/arm: ffa: Introduce VM to VM support
commit 09a201605f990a09d61d69fbb51c30ef10fd6468
Author: Bertrand Marquis <bertrand.marquis@xxxxxxx>
AuthorDate: Mon Aug 11 15:34:59 2025 +0200
Commit: Julien Grall <jgrall@xxxxxxxxxx>
CommitDate: Tue Aug 12 12:04:41 2025 +0100
xen/arm: ffa: Introduce VM to VM support
Create a CONFIG_FFA_VM_TO_VM parameter to activate FFA communication
between VMs.
When activated list VMs in the system with FF-A support in part_info_get.
When VM to VM is activated, Xen will be tainted as Insecure and a
message is displayed to the user during the boot as there is no
filtering of VMs in FF-A so any VM can communicate or see any other VM
in the system.
WARNING: There is no filtering for now and all VMs are listed !!
This patch is reorganizing the ffa_ctx structure to make clear which
lock is protecting what parts.
This patch is introducing a chain list of the ffa_ctx with a FFA Version
negotiated allowing to create the partinfo results for VMs in parallel
by using rwlock which only ensure addition/removal of entries are
protected.
Signed-off-by: Bertrand Marquis <bertrand.marquis@xxxxxxx>
Reviewed-by: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
Acked-by: Julien Grall <jgrall@xxxxxxxxxx>
---
xen/arch/arm/tee/Kconfig | 11 ++++
xen/arch/arm/tee/ffa.c | 49 ++++++++++++++++-
xen/arch/arm/tee/ffa_partinfo.c | 100 ++++++++++++++++++++++++++++++----
xen/arch/arm/tee/ffa_private.h | 117 +++++++++++++++++++++++++++++++++-------
4 files changed, 245 insertions(+), 32 deletions(-)
diff --git a/xen/arch/arm/tee/Kconfig b/xen/arch/arm/tee/Kconfig
index c5b0f88d75..88a4c4c991 100644
--- a/xen/arch/arm/tee/Kconfig
+++ b/xen/arch/arm/tee/Kconfig
@@ -28,5 +28,16 @@ config FFA
[1] https://developer.arm.com/documentation/den0077/latest
+config FFA_VM_TO_VM
+ bool "Enable FF-A between VMs (UNSUPPORTED)" if UNSUPPORTED
+ default n
+ depends on FFA
+ help
+ This option enables to use FF-A between VMs.
+ This is experimental and there is no access control so any
+ guest can communicate with any other guest.
+
+ If unsure, say N.
+
endmenu
diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
index 3bbdd7168a..0eb6a0bda5 100644
--- a/xen/arch/arm/tee/ffa.c
+++ b/xen/arch/arm/tee/ffa.c
@@ -118,10 +118,18 @@ void *ffa_tx __read_mostly;
DEFINE_SPINLOCK(ffa_rx_buffer_lock);
DEFINE_SPINLOCK(ffa_tx_buffer_lock);
+LIST_HEAD(ffa_ctx_head);
+/* RW Lock to protect addition/removal and reading in ffa_ctx_head */
+DEFINE_RWLOCK(ffa_ctx_list_rwlock);
+
+#ifdef CONFIG_FFA_VM_TO_VM
+atomic_t ffa_vm_count;
+#endif
/* Used to track domains that could not be torn down immediately. */
static struct timer ffa_teardown_timer;
-static struct list_head ffa_teardown_head;
+
+static LIST_HEAD(ffa_teardown_head);
static DEFINE_SPINLOCK(ffa_teardown_lock);
static bool ffa_get_version(uint32_t *vers)
@@ -151,6 +159,7 @@ static void handle_version(struct cpu_user_regs *regs)
struct domain *d = current->domain;
struct ffa_ctx *ctx = d->arch.tee;
uint32_t vers = get_user_reg(regs, 1);
+ uint32_t old_vers;
/*
* Guest will use the version it requested if it is our major and minor
@@ -160,10 +169,23 @@ static void handle_version(struct cpu_user_regs *regs)
*/
if ( FFA_VERSION_MAJOR(vers) == FFA_MY_VERSION_MAJOR )
{
+ spin_lock(&ctx->lock);
+ old_vers = ctx->guest_vers;
+
if ( FFA_VERSION_MINOR(vers) > FFA_MY_VERSION_MINOR )
ctx->guest_vers = FFA_MY_VERSION;
else
ctx->guest_vers = vers;
+ spin_unlock(&ctx->lock);
+
+ if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) && !old_vers )
+ {
+ /* One more VM with FF-A support available */
+ inc_ffa_vm_count();
+ write_lock(&ffa_ctx_list_rwlock);
+ list_add_tail(&ctx->ctx_list, &ffa_ctx_head);
+ write_unlock(&ffa_ctx_list_rwlock);
+ }
}
ffa_set_regs(regs, FFA_MY_VERSION, 0, 0, 0, 0, 0, 0, 0);
}
@@ -345,6 +367,10 @@ static int ffa_domain_init(struct domain *d)
ctx->teardown_d = d;
INIT_LIST_HEAD(&ctx->shm_list);
+ ctx->ffa_id = ffa_get_vm_id(d);
+ ctx->num_vcpus = d->max_vcpus;
+ ctx->is_64bit = is_64bit_domain(d);
+
/*
* ffa_domain_teardown() will be called if ffa_domain_init() returns an
* error, so no need for cleanup in this function.
@@ -421,6 +447,14 @@ static int ffa_domain_teardown(struct domain *d)
if ( !ctx )
return 0;
+ if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) && ctx->guest_vers )
+ {
+ dec_ffa_vm_count();
+ write_lock(&ffa_ctx_list_rwlock);
+ list_del(&ctx->ctx_list);
+ write_unlock(&ffa_ctx_list_rwlock);
+ }
+
ffa_rxtx_domain_destroy(d);
ffa_notif_domain_destroy(d);
@@ -464,6 +498,18 @@ static bool ffa_probe(void)
printk(XENLOG_INFO "ARM FF-A Mediator version %u.%u\n",
FFA_MY_VERSION_MAJOR, FFA_MY_VERSION_MINOR);
+ if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) )
+ {
+ /*
+ * When FFA VM to VM is enabled, the current implementation does not
+ * offer any way to limit which VM can communicate with which VM using
+ * FF-A.
+ * Signal this in the xen console and taint the system as insecure.
+ * TODO: Introduce a solution to limit what a VM can do through FFA.
+ */
+ printk(XENLOG_ERR "ffa: VM to VM is enabled, system is insecure !!\n");
+ add_taint(TAINT_MACHINE_INSECURE);
+ }
/*
* psci_init_smccc() updates this value with what's reported by EL-3
* or secure world.
@@ -537,7 +583,6 @@ static bool ffa_probe(void)
goto err_rxtx_destroy;
ffa_notif_init();
- INIT_LIST_HEAD(&ffa_teardown_head);
init_timer(&ffa_teardown_timer, ffa_teardown_timer_callback, NULL, 0);
return true;
diff --git a/xen/arch/arm/tee/ffa_partinfo.c b/xen/arch/arm/tee/ffa_partinfo.c
index dfa0b23eaf..fa56b1587e 100644
--- a/xen/arch/arm/tee/ffa_partinfo.c
+++ b/xen/arch/arm/tee/ffa_partinfo.c
@@ -150,6 +150,73 @@ out:
return ret;
}
+static int32_t ffa_get_vm_partinfo(uint32_t *vm_count, void *dst_buf,
+ void *end_buf, uint32_t dst_size)
+{
+ struct ffa_ctx *curr_ctx = current->domain->arch.tee;
+ struct ffa_ctx *dest_ctx;
+ uint32_t count = 0;
+ int32_t ret = FFA_RET_OK;
+
+ /*
+ * There could potentially be a lot of VMs in the system and we could
+ * hold the CPU for long here.
+ * Right now there is no solution in FF-A specification to split
+ * the work in this case.
+ * TODO: Check how we could delay the work or have preemption checks.
+ */
+ read_lock(&ffa_ctx_list_rwlock);
+ list_for_each_entry(dest_ctx, &ffa_ctx_head, ctx_list)
+ {
+ /*
+ * Do not include an entry for the caller VM as the spec is not
+ * clearly mandating it and it is not supported by Linux.
+ */
+ if ( dest_ctx != curr_ctx )
+ {
+ /*
+ * We do not have UUID info for VMs so use
+ * the 1.0 structure so that we set UUIDs to
+ * zero using memset
+ */
+ struct ffa_partition_info_1_0 info;
+
+ if ( dst_buf > (end_buf - dst_size) )
+ {
+ ret = FFA_RET_NO_MEMORY;
+ goto out;
+ }
+
+ /*
+ * Context might has been removed since we go it or being removed
+ * right now so we might return information for a VM not existing
+ * anymore. This is acceptable as we return a view of the system
+ * which could change at any time.
+ */
+ info.id = dest_ctx->ffa_id;
+ info.execution_context = dest_ctx->num_vcpus;
+ info.partition_properties = FFA_PART_VM_PROP;
+ if ( dest_ctx->is_64bit )
+ info.partition_properties |= FFA_PART_PROP_AARCH64_STATE;
+
+ memcpy(dst_buf, &info, MIN(sizeof(info), dst_size));
+
+ if ( dst_size > sizeof(info) )
+ memset(dst_buf + sizeof(info), 0,
+ dst_size - sizeof(info));
+
+ dst_buf += dst_size;
+ count++;
+ }
+ }
+ *vm_count = count;
+
+out:
+ read_unlock(&ffa_ctx_list_rwlock);
+
+ return ret;
+}
+
void ffa_handle_partition_info_get(struct cpu_user_regs *regs)
{
int32_t ret = FFA_RET_OK;
@@ -164,7 +231,7 @@ void ffa_handle_partition_info_get(struct cpu_user_regs
*regs)
};
uint32_t dst_size = 0;
void *dst_buf, *end_buf;
- uint32_t ffa_sp_count = 0;
+ uint32_t ffa_vm_count = 0, ffa_sp_count = 0;
/*
* If the guest is v1.0, he does not get back the entry size so we must
@@ -191,15 +258,18 @@ void ffa_handle_partition_info_get(struct cpu_user_regs
*regs)
}
if ( ffa_fw_supports_fid(FFA_PARTITION_INFO_GET) )
+ {
ret = ffa_get_sp_count(uuid, &ffa_sp_count);
+ if ( ret )
+ goto out;
+ }
- goto out;
- }
+ /*
+ * Do not count the caller VM as the spec is not clearly mandating it
+ * and it is not supported by Linux.
+ */
+ ffa_vm_count = get_ffa_vm_count() - 1;
- if ( !ffa_fw_supports_fid(FFA_PARTITION_INFO_GET) )
- {
- /* Just give an empty partition list to the caller */
- ret = FFA_RET_OK;
goto out;
}
@@ -224,9 +294,19 @@ void ffa_handle_partition_info_get(struct cpu_user_regs
*regs)
goto out_rx_release;
}
- ret = ffa_get_sp_partinfo(uuid, &ffa_sp_count, dst_buf, end_buf,
- dst_size);
+ if ( ffa_fw_supports_fid(FFA_PARTITION_INFO_GET) )
+ {
+ ret = ffa_get_sp_partinfo(uuid, &ffa_sp_count, dst_buf, end_buf,
+ dst_size);
+
+ if ( ret )
+ goto out_rx_release;
+
+ dst_buf += ffa_sp_count * dst_size;
+ }
+ if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) )
+ ret = ffa_get_vm_partinfo(&ffa_vm_count, dst_buf, end_buf, dst_size);
out_rx_release:
if ( ret )
@@ -235,7 +315,7 @@ out:
if ( ret )
ffa_set_regs_error(regs, ret);
else
- ffa_set_regs_success(regs, ffa_sp_count, dst_size);
+ ffa_set_regs_success(regs, ffa_sp_count + ffa_vm_count, dst_size);
}
static int32_t ffa_direct_req_send_vm(uint16_t sp_id, uint16_t vm_id,
diff --git a/xen/arch/arm/tee/ffa_private.h b/xen/arch/arm/tee/ffa_private.h
index 0a9c1082db..5cb2b51714 100644
--- a/xen/arch/arm/tee/ffa_private.h
+++ b/xen/arch/arm/tee/ffa_private.h
@@ -195,6 +195,18 @@
*/
#define FFA_PARTITION_INFO_GET_COUNT_FLAG BIT(0, U)
+/*
+ * Partition properties we give for a normal world VM:
+ * - can send direct message but not receive them
+ * - can handle indirect messages
+ * - can receive notifications
+ * 32/64 bit flag is set depending on the VM
+ */
+#define FFA_PART_VM_PROP (FFA_PART_PROP_DIRECT_REQ_SEND | \
+ FFA_PART_PROP_INDIRECT_MSGS | \
+ FFA_PART_PROP_RECV_NOTIF | \
+ FFA_PART_PROP_IS_PE_ID)
+
/* Flags used in calls to FFA_NOTIFICATION_GET interface */
#define FFA_NOTIF_FLAG_BITMAP_SP BIT(0, U)
#define FFA_NOTIF_FLAG_BITMAP_VM BIT(1, U)
@@ -297,36 +309,72 @@ struct ffa_ctx_notif {
};
struct ffa_ctx {
- void *rx;
- const void *tx;
- struct page_info *rx_pg;
- struct page_info *tx_pg;
+ /*
+ * Chain list of all FF-A contexts.
+ * As we might have several read from the list of context through parallel
+ * partinfo_get but fewer additions/removal as those happen only during a
+ * version negotiation or guest shutdown, access to this list is protected
+ * through a rwlock (addition/removal with write lock, reading through a
+ * read lock).
+ */
+ struct list_head ctx_list; /* chain list of all FF-A contexts */
+
+ /*
+ * Data access unlocked (mainly for part_info_get in VM to VM).
+ * Those should be set before the ctx is added in the list.
+ */
+ /* FF-A Endpoint ID */
+ uint16_t ffa_id;
+ uint16_t num_vcpus;
+ bool is_64bit;
+
+ /*
+ * Global data accessed atomically or using ACCES_ONCE.
+ */
+ struct ffa_ctx_notif notif;
+
+ /*
+ * Global data accessed with lock locked.
+ */
+ spinlock_t lock;
+ /*
+ * FF-A version negotiated by the guest, only modifications to
+ * this field are done with the lock held as this is expected to
+ * be done once at init by a guest.
+ */
+ uint32_t guest_vers;
/* Number of 4kB pages in each of rx/rx_pg and tx/tx_pg */
unsigned int page_count;
- /* FF-A version used by the guest */
- uint32_t guest_vers;
- bool rx_is_free;
- /* Used shared memory objects, struct ffa_shm_mem */
- struct list_head shm_list;
/* Number of allocated shared memory object */
unsigned int shm_count;
- struct ffa_ctx_notif notif;
+ /* Used shared memory objects, struct ffa_shm_mem */
+ struct list_head shm_list;
+
/*
- * tx_lock is used to serialize access to tx
- * rx_lock is used to serialize access to rx_is_free
- * lock is used for the rest in this struct
+ * Rx buffer, accessed with rx_lock locked.
+ * rx_is_free is used to serialize access.
*/
- spinlock_t tx_lock;
spinlock_t rx_lock;
- spinlock_t lock;
- /* Used if domain can't be torn down immediately */
+ bool rx_is_free;
+ void *rx;
+ struct page_info *rx_pg;
+
+ /*
+ * Tx buffer, access with tx_lock locked.
+ */
+ spinlock_t tx_lock;
+ const void *tx;
+ struct page_info *tx_pg;
+
+
+ /*
+ * Domain teardown handling if data shared or used by other domains
+ * do not allow to teardown the domain immediately.
+ */
struct domain *teardown_d;
struct list_head teardown_list;
s_time_t teardown_expire;
- /*
- * Used for ffa_domain_teardown() to keep track of which SPs should be
- * notified that this guest is being destroyed.
- */
+ /* Keep track of SPs that should be notified of VM destruction */
unsigned long *vm_destroy_bitmap;
};
@@ -336,6 +384,12 @@ extern spinlock_t ffa_rx_buffer_lock;
extern spinlock_t ffa_tx_buffer_lock;
extern DECLARE_BITMAP(ffa_fw_abi_supported, FFA_ABI_BITMAP_SIZE);
+extern struct list_head ffa_ctx_head;
+extern rwlock_t ffa_ctx_list_rwlock;
+#ifdef CONFIG_FFA_VM_TO_VM
+extern atomic_t ffa_vm_count;
+#endif
+
bool ffa_shm_domain_destroy(struct domain *d);
void ffa_handle_mem_share(struct cpu_user_regs *regs);
int ffa_handle_mem_reclaim(uint64_t handle, uint32_t flags);
@@ -368,6 +422,29 @@ int ffa_handle_notification_set(struct cpu_user_regs
*regs);
void ffa_handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t fid);
int32_t ffa_handle_msg_send2(struct cpu_user_regs *regs);
+#ifdef CONFIG_FFA_VM_TO_VM
+static inline uint16_t get_ffa_vm_count(void)
+{
+ return atomic_read(&ffa_vm_count);
+}
+
+static inline void inc_ffa_vm_count(void)
+{
+ atomic_inc(&ffa_vm_count);
+}
+
+static inline void dec_ffa_vm_count(void)
+{
+ ASSERT(atomic_read(&ffa_vm_count) > 0);
+ atomic_dec(&ffa_vm_count);
+}
+#else
+/* Only count the caller VM */
+#define get_ffa_vm_count() ((uint16_t)1UL)
+#define inc_ffa_vm_count() do {} while(0)
+#define dec_ffa_vm_count() do {} while(0)
+#endif
+
static inline uint16_t ffa_get_vm_id(const struct domain *d)
{
/* +1 since 0 is reserved for the hypervisor in FF-A */
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |