|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH V2 1/25] VIOMMU: Add vIOMMU helper functions to create, destroy and query capabilities
On Thu, Aug 17, 2017 at 08:22:16PM -0400, Lan Tianyu wrote:
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index b22aacc..d1f9b10 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -396,6 +396,9 @@ struct domain *domain_create(domid_t domid, unsigned int
> domcr_flags,
> spin_unlock(&domlist_update_lock);
> }
>
> + if ( (err = viommu_init_domain(d)) != 0 )
> + goto fail;
> +
Where is the code to destroy viommu during domain destruction?
I suppose you will need a viommu_destroy_domain and call it somewhere in
complete_domain_destroy.
> +
> +#include <xen/sched.h>
> +#include <xen/spinlock.h>
> +#include <xen/types.h>
> +#include <xen/viommu.h>
> +
> +bool __read_mostly opt_viommu;
> +boolean_param("viommu", opt_viommu);
Missing patch to xen command line option doc.
> +
> +static spinlock_t type_list_lock;
> +static struct list_head type_list;
> +
> +struct viommu_type {
> + u64 type;
uintXX_t here and in all other places please.
[...]
> +
> +static int viommu_create(struct domain *d, u64 type, u64 base_address,
> + u64 length, u64 caps)
> +{
> + struct viommu_info *info = &d->viommu;
> + struct viommu *viommu;
> + struct viommu_type *viommu_type = NULL;
> + int rc;
> +
> + viommu_type = viommu_get_type(type);
> + if ( !viommu_type )
> + return -EINVAL;
> +
> + if ( info->nr_viommu >= NR_VIOMMU_PER_DOMAIN
E2BIG for this?
> + || !viommu_type->ops || !viommu_type->ops->create )
> + return -EINVAL;
> +
> + viommu = xzalloc(struct viommu);
> + if ( !viommu )
> + return -ENOMEM;
> +
[...]
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index 6673b27..98a965a 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -21,6 +21,7 @@
> #include <xen/perfc.h>
> #include <asm/atomic.h>
> #include <xen/wait.h>
> +#include <xen/viommu.h>
> #include <public/xen.h>
> #include <public/domctl.h>
> #include <public/sysctl.h>
> @@ -477,6 +478,7 @@ struct domain
> /* vNUMA topology accesses are protected by rwlock. */
> rwlock_t vnuma_rwlock;
> struct vnuma_info *vnuma;
Please add a new line here.
> + struct viommu_info viommu;
>
> /* Common monitor options */
> struct {
> diff --git a/xen/include/xen/viommu.h b/xen/include/xen/viommu.h
> new file mode 100644
> index 0000000..506ea54
> --- /dev/null
> +++ b/xen/include/xen/viommu.h
> @@ -0,0 +1,71 @@
> +/*
> + * include/xen/viommu.h
> + *
> + * Copyright (c) 2017, Intel Corporation
> + * Author: Lan Tianyu <tianyu.lan@xxxxxxxxx>
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along
> with
> + * this program; If not, see <http://www.gnu.org/licenses/>.
> + *
> + */
> +#ifndef __XEN_VIOMMU_H__
> +#define __XEN_VIOMMU_H__
> +
> +#define NR_VIOMMU_PER_DOMAIN 1
> +
> +struct viommu;
> +
> +struct viommu_ops {
> + u64 (*query_caps)(struct domain *d);
> + int (*create)(struct domain *d, struct viommu *viommu);
> + int (*destroy)(struct viommu *viommu);
> +};
> +
> +struct viommu {
> + u64 base_address;
> + u64 length;
> + u64 caps;
> + u32 viommu_id;
> + const struct viommu_ops *ops;
> + void *priv;
> +};
> +
> +struct viommu_info {
> + u32 nr_viommu;
unsigned int
> + struct viommu *viommu[NR_VIOMMU_PER_DOMAIN]; /* viommu array*/
> +};
> +
> +#ifdef CONFIG_VIOMMU
> +extern bool_t opt_viommu;
bool
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |