[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v4 02/14] libxc: Prepare a start info structure for hvmloader
> > diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c > > index bdec40a..9c56d55 100644 > > --- a/tools/libxc/xc_dom_x86.c > > +++ b/tools/libxc/xc_dom_x86.c > > @@ -69,6 +69,9 @@ > > #define round_up(addr, mask) ((addr) | (mask)) > > #define round_pg_up(addr) (((addr) + PAGE_SIZE_X86 - 1) & ~(PAGE_SIZE_X86 > > - 1)) > > > > +#define HVMLOADER_MODULE_MAX_COUNT 1 > > +#define HVMLOADER_MODULE_NAME_SIZE 10 > > + > > struct xc_dom_params { > > unsigned levels; > > xen_vaddr_t vaddr_mask; > > @@ -590,6 +593,7 @@ static int alloc_magic_pages_hvm(struct xc_dom_image > > *dom) > > xen_pfn_t special_array[X86_HVM_NR_SPECIAL_PAGES]; > > xen_pfn_t ioreq_server_array[NR_IOREQ_SERVER_PAGES]; > > xc_interface *xch = dom->xch; > > + size_t start_info_size = sizeof(struct hvm_start_info); > > > > /* Allocate and clear special pages. */ > > for ( i = 0; i < X86_HVM_NR_SPECIAL_PAGES; i++ ) > > @@ -624,8 +628,6 @@ static int alloc_magic_pages_hvm(struct xc_dom_image > > *dom) > > > > if ( !dom->device_model ) > > { > > - size_t start_info_size = sizeof(struct hvm_start_info); > > - > > if ( dom->cmdline ) > > { > > dom->cmdline_size = ROUNDUP(strlen(dom->cmdline) + 1, 8); > > @@ -635,17 +637,26 @@ static int alloc_magic_pages_hvm(struct xc_dom_image > > *dom) > > /* Limited to one module. */ > > if ( dom->ramdisk_blob ) > > start_info_size += sizeof(struct hvm_modlist_entry); > > - > > - rc = xc_dom_alloc_segment(dom, &dom->start_info_seg, > > - "HVMlite start info", 0, > > start_info_size); > > - if ( rc != 0 ) > > - { > > - DOMPRINTF("Unable to reserve memory for the start info"); > > - goto out; > > - } > > } > > else > > { > > + start_info_size += > > + sizeof(struct hvm_modlist_entry) * HVMLOADER_MODULE_MAX_COUNT; > > + /* Add extra space to write modules name */ > > + start_info_size += > > + HVMLOADER_MODULE_NAME_SIZE * HVMLOADER_MODULE_MAX_COUNT; > > What about \0 ? Ah, the strncpy we use adds \0 byte. But it would be nice > to mention that somewhere. Perhaps mention: > > The HVMLOADER_MODULE_NAME_SIZE accounts for NUL byte? Yes, I can add a comment about it. > > + } > > + > > + rc = xc_dom_alloc_segment(dom, &dom->start_info_seg, > > + "HVMlite start info", 0, start_info_size); > > + if ( rc != 0 ) > > + { > > + DOMPRINTF("Unable to reserve memory for the start info"); > > + goto out; > > + } > > + > > + if ( dom->device_model ) > > + { > > /* > > * Allocate and clear additional ioreq server pages. The default > > * server will use the IOREQ and BUFIOREQ special pages above. > > @@ -1696,39 +1707,68 @@ static int alloc_pgtables_hvm(struct xc_dom_image > > *dom) > > return 0; > > } > > > > +static void add_module_to_list(struct xc_dom_image *dom, > > + struct xc_hvm_firmware_module *module, > > + const char *name, > > + struct hvm_modlist_entry *modlist, > > + struct hvm_start_info *start_info) > > +{ > > + uint32_t index = start_info->nr_modules; > > + if ( module->length == 0 ) > > + return; > > + > > + assert(start_info->nr_modules < HVMLOADER_MODULE_MAX_COUNT); > > + assert(strnlen(name, HVMLOADER_MODULE_NAME_SIZE) > > + < HVMLOADER_MODULE_NAME_SIZE); > > + > > + modlist[index].paddr = module->guest_addr_out; > > + modlist[index].size = module->length; > > + strncpy((char*)(modlist + HVMLOADER_MODULE_MAX_COUNT) > > + + HVMLOADER_MODULE_NAME_SIZE * index, > > + name, HVMLOADER_MODULE_NAME_SIZE); > > + modlist[index].cmdline_paddr = > > + (dom->start_info_seg.pfn << PAGE_SHIFT) + > > + ((uintptr_t)modlist - (uintptr_t)start_info) + > > + sizeof(struct hvm_modlist_entry) * HVMLOADER_MODULE_MAX_COUNT + > > + HVMLOADER_MODULE_NAME_SIZE * index; > > That looks right, but boy it takes a bit of thinking to > make sure it is right. Perhaps put a comment outlining where > it ought to be (so folks reading the first time can feel > OK they got it right?) Yes, I can add comments, and maybe try to simplify it a bit. -- Anthony PERARD _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |