|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v4 11/17] xen: mapcache: Make MCACHE_BUCKET_SHIFT runtime configurable
On Tue, 30 Apr 2024, Edgar E. Iglesias wrote:
> From: "Edgar E. Iglesias" <edgar.iglesias@xxxxxxx>
>
> Make MCACHE_BUCKET_SHIFT runtime configurable per cache instance.
>
> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xxxxxxx>
Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> ---
> hw/xen/xen-mapcache.c | 52 ++++++++++++++++++++++++++-----------------
> 1 file changed, 31 insertions(+), 21 deletions(-)
>
> diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
> index 72a7e25e3e..4f98d284dd 100644
> --- a/hw/xen/xen-mapcache.c
> +++ b/hw/xen/xen-mapcache.c
> @@ -23,13 +23,10 @@
>
>
> #if HOST_LONG_BITS == 32
> -# define MCACHE_BUCKET_SHIFT 16
> # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
> #else
> -# define MCACHE_BUCKET_SHIFT 20
> # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
> #endif
> -#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
>
> /* This is the size of the virtual address space reserve to QEMU that will
> not
> * be use by MapCache.
> @@ -65,7 +62,8 @@ typedef struct MapCache {
> /* For most cases (>99.9%), the page address is the same. */
> MapCacheEntry *last_entry;
> unsigned long max_mcache_size;
> - unsigned int mcache_bucket_shift;
> + unsigned int bucket_shift;
> + unsigned long bucket_size;
>
> phys_offset_to_gaddr_t phys_offset_to_gaddr;
> QemuMutex lock;
> @@ -95,6 +93,7 @@ static inline int test_bits(int nr, int size, const
> unsigned long *addr)
>
> static MapCache *xen_map_cache_init_single(phys_offset_to_gaddr_t f,
> void *opaque,
> + unsigned int bucket_shift,
> unsigned long max_size)
> {
> unsigned long size;
> @@ -108,12 +107,14 @@ static MapCache
> *xen_map_cache_init_single(phys_offset_to_gaddr_t f,
>
> QTAILQ_INIT(&mc->locked_entries);
>
> + mc->bucket_shift = bucket_shift;
> + mc->bucket_size = 1UL << bucket_shift;
> mc->max_mcache_size = max_size;
>
> mc->nr_buckets =
> (((mc->max_mcache_size >> XC_PAGE_SHIFT) +
> - (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
> - (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
> + (1UL << (bucket_shift - XC_PAGE_SHIFT)) - 1) >>
> + (bucket_shift - XC_PAGE_SHIFT));
>
> size = mc->nr_buckets * sizeof(MapCacheEntry);
> size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
> @@ -126,6 +127,13 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void
> *opaque)
> {
> struct rlimit rlimit_as;
> unsigned long max_mcache_size;
> + unsigned int bucket_shift;
> +
> + if (HOST_LONG_BITS == 32) {
> + bucket_shift = 16;
> + } else {
> + bucket_shift = 20;
> + }
>
> if (geteuid() == 0) {
> rlimit_as.rlim_cur = RLIM_INFINITY;
> @@ -146,7 +154,9 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void
> *opaque)
> }
> }
>
> - mapcache = xen_map_cache_init_single(f, opaque, max_mcache_size);
> + mapcache = xen_map_cache_init_single(f, opaque,
> + bucket_shift,
> + max_mcache_size);
> setrlimit(RLIMIT_AS, &rlimit_as);
> }
>
> @@ -195,7 +205,7 @@ static void xen_remap_bucket(MapCache *mc,
> entry->valid_mapping = NULL;
>
> for (i = 0; i < nb_pfn; i++) {
> - pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
> + pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i;
> }
>
> /*
> @@ -266,8 +276,8 @@ static uint8_t *xen_map_cache_unlocked(MapCache *mc,
> bool dummy = false;
>
> tryagain:
> - address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
> - address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
> + address_index = phys_addr >> mc->bucket_shift;
> + address_offset = phys_addr & (mc->bucket_size - 1);
>
> trace_xen_map_cache(phys_addr);
>
> @@ -294,14 +304,14 @@ tryagain:
> return mc->last_entry->vaddr_base + address_offset;
> }
>
> - /* size is always a multiple of MCACHE_BUCKET_SIZE */
> + /* size is always a multiple of mc->bucket_size */
> if (size) {
> cache_size = size + address_offset;
> - if (cache_size % MCACHE_BUCKET_SIZE) {
> - cache_size += MCACHE_BUCKET_SIZE - (cache_size %
> MCACHE_BUCKET_SIZE);
> + if (cache_size % mc->bucket_size) {
> + cache_size += mc->bucket_size - (cache_size % mc->bucket_size);
> }
> } else {
> - cache_size = MCACHE_BUCKET_SIZE;
> + cache_size = mc->bucket_size;
> }
>
> entry = &mc->entry[address_index % mc->nr_buckets];
> @@ -419,7 +429,7 @@ static ram_addr_t
> xen_ram_addr_from_mapcache_single(MapCache *mc, void *ptr)
> trace_xen_ram_addr_from_mapcache_not_in_cache(ptr);
> raddr = RAM_ADDR_INVALID;
> } else {
> - raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
> + raddr = (reventry->paddr_index << mc->bucket_shift) +
> ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
> }
> mapcache_unlock(mc);
> @@ -582,8 +592,8 @@ static uint8_t *xen_replace_cache_entry_unlocked(MapCache
> *mc,
> hwaddr address_index, address_offset;
> hwaddr test_bit_size, cache_size = size;
>
> - address_index = old_phys_addr >> MCACHE_BUCKET_SHIFT;
> - address_offset = old_phys_addr & (MCACHE_BUCKET_SIZE - 1);
> + address_index = old_phys_addr >> mc->bucket_shift;
> + address_offset = old_phys_addr & (mc->bucket_size - 1);
>
> assert(size);
> /* test_bit_size is always a multiple of XC_PAGE_SIZE */
> @@ -592,8 +602,8 @@ static uint8_t *xen_replace_cache_entry_unlocked(MapCache
> *mc,
> test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
> }
> cache_size = size + address_offset;
> - if (cache_size % MCACHE_BUCKET_SIZE) {
> - cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE);
> + if (cache_size % mc->bucket_size) {
> + cache_size += mc->bucket_size - (cache_size % mc->bucket_size);
> }
>
> entry = &mc->entry[address_index % mc->nr_buckets];
> @@ -606,8 +616,8 @@ static uint8_t *xen_replace_cache_entry_unlocked(MapCache
> *mc,
> return NULL;
> }
>
> - address_index = new_phys_addr >> MCACHE_BUCKET_SHIFT;
> - address_offset = new_phys_addr & (MCACHE_BUCKET_SIZE - 1);
> + address_index = new_phys_addr >> mc->bucket_shift;
> + address_offset = new_phys_addr & (mc->bucket_size - 1);
>
> trace_xen_replace_cache_entry_dummy(old_phys_addr, new_phys_addr);
>
> --
> 2.40.1
>
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |