[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] domctl/sysctl: Clean up definitions
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1261481595 0 # Node ID bde24590c13a65a55b3bb240d47765cfd935fbc6 # Parent 3c93fffa650221e7f86d23337f5b5f2094e54e2f domctl/sysctl: Clean up definitions - Use fixed-width types only - Use named unions only - Bump domctl version number Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- tools/libxc/xc_memshr.c | 20 +++---- tools/libxc/xc_pm.c | 30 +++++------ xen/arch/x86/mm/mem_sharing.c | 20 +++---- xen/drivers/acpi/pmstat.c | 108 +++++++++++++++++++++--------------------- xen/include/public/domctl.h | 64 ++++++++++++------------ xen/include/public/sysctl.h | 2 6 files changed, 123 insertions(+), 121 deletions(-) diff -r 3c93fffa6502 -r bde24590c13a tools/libxc/xc_memshr.c --- a/tools/libxc/xc_memshr.c Mon Dec 21 16:51:40 2009 +0000 +++ b/tools/libxc/xc_memshr.c Tue Dec 22 11:33:15 2009 +0000 @@ -37,7 +37,7 @@ int xc_memshr_control(int xc_handle, domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); op->op = XEN_DOMCTL_MEM_SHARING_OP_CONTROL; - op->enable = enable; + op->u.enable = enable; return do_domctl(xc_handle, &domctl); } @@ -56,10 +56,10 @@ int xc_memshr_nominate_gfn(int xc_handle domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); op->op = XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN; - op->nominate.gfn = gfn; + op->u.nominate.u.gfn = gfn; ret = do_domctl(xc_handle, &domctl); - if(!ret) *handle = op->nominate.handle; + if(!ret) *handle = op->u.nominate.handle; return ret; } @@ -78,10 +78,10 @@ int xc_memshr_nominate_gref(int xc_handl domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); op->op = XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF; - op->nominate.grant_ref = gref; + op->u.nominate.u.grant_ref = gref; ret = do_domctl(xc_handle, &domctl); - if(!ret) *handle = op->nominate.handle; + if(!ret) *handle = op->u.nominate.handle; return ret; } @@ -98,8 +98,8 @@ int xc_memshr_share(int xc_handle, domctl.domain = 0; op = &(domctl.u.mem_sharing_op); op->op = XEN_DOMCTL_MEM_SHARING_OP_SHARE; - op->share.source_handle = source_handle; - op->share.client_handle = client_handle; + op->u.share.source_handle = source_handle; + op->u.share.client_handle = client_handle; return do_domctl(xc_handle, &domctl); } @@ -131,7 +131,7 @@ int xc_memshr_debug_gfn(int xc_handle, domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN; - op->debug.gfn = gfn; + op->u.debug.u.gfn = gfn; return do_domctl(xc_handle, &domctl); } @@ -148,7 +148,7 @@ int xc_memshr_debug_mfn(int xc_handle, domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN; - op->debug.mfn = mfn; + op->u.debug.u.mfn = mfn; return do_domctl(xc_handle, &domctl); } @@ -165,7 +165,7 @@ int xc_memshr_debug_gref(int xc_handle, domctl.domain = (domid_t)domid; op = &(domctl.u.mem_sharing_op); op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF; - op->debug.gref = gref; + op->u.debug.u.gref = gref; return do_domctl(xc_handle, &domctl); } diff -r 3c93fffa6502 -r bde24590c13a tools/libxc/xc_pm.c --- a/tools/libxc/xc_pm.c Mon Dec 21 16:51:40 2009 +0000 +++ b/tools/libxc/xc_pm.c Tue Dec 22 11:33:15 2009 +0000 @@ -184,7 +184,7 @@ int xc_get_cpufreq_para(int xc_handle, i { DECLARE_SYSCTL; int ret = 0; - struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.get_para; + struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.u.get_para; bool has_num = user_para->cpu_num && user_para->freq_num && user_para->gov_num; @@ -276,7 +276,7 @@ int xc_set_cpufreq_gov(int xc_handle, in int xc_set_cpufreq_gov(int xc_handle, int cpuid, char *govname) { DECLARE_SYSCTL; - char *scaling_governor = sysctl.u.pm_op.set_gov.scaling_governor; + char *scaling_governor = sysctl.u.pm_op.u.set_gov.scaling_governor; if ( (xc_handle < 0) || (!govname) ) return -EINVAL; @@ -301,8 +301,8 @@ int xc_set_cpufreq_para(int xc_handle, i sysctl.cmd = XEN_SYSCTL_pm_op; sysctl.u.pm_op.cmd = SET_CPUFREQ_PARA; sysctl.u.pm_op.cpuid = cpuid; - sysctl.u.pm_op.set_para.ctrl_type = ctrl_type; - sysctl.u.pm_op.set_para.ctrl_value = ctrl_value; + sysctl.u.pm_op.u.set_para.ctrl_type = ctrl_type; + sysctl.u.pm_op.u.set_para.ctrl_value = ctrl_value; return xc_sysctl(xc_handle, &sysctl); } @@ -320,7 +320,7 @@ int xc_get_cpufreq_avgfreq(int xc_handle sysctl.u.pm_op.cpuid = cpuid; ret = xc_sysctl(xc_handle, &sysctl); - *avg_freq = sysctl.u.pm_op.get_avgfreq; + *avg_freq = sysctl.u.pm_op.u.get_avgfreq; return ret; } @@ -333,14 +333,14 @@ int xc_get_cputopo(int xc_handle, struct sysctl.cmd = XEN_SYSCTL_pm_op; sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_get_cputopo; sysctl.u.pm_op.cpuid = 0; - set_xen_guest_handle( sysctl.u.pm_op.get_topo.cpu_to_core, + set_xen_guest_handle( sysctl.u.pm_op.u.get_topo.cpu_to_core, info->cpu_to_core ); - set_xen_guest_handle( sysctl.u.pm_op.get_topo.cpu_to_socket, + set_xen_guest_handle( sysctl.u.pm_op.u.get_topo.cpu_to_socket, info->cpu_to_socket ); - sysctl.u.pm_op.get_topo.max_cpus = info->max_cpus; + sysctl.u.pm_op.u.get_topo.max_cpus = info->max_cpus; rc = do_sysctl(xc_handle, &sysctl); - info->nr_cpus = sysctl.u.pm_op.get_topo.nr_cpus; + info->nr_cpus = sysctl.u.pm_op.u.get_topo.nr_cpus; return rc; } @@ -356,7 +356,7 @@ int xc_set_sched_opt_smt(int xc_handle, sysctl.cmd = XEN_SYSCTL_pm_op; sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_sched_opt_smt; sysctl.u.pm_op.cpuid = 0; - sysctl.u.pm_op.set_sched_opt_smt = value; + sysctl.u.pm_op.u.set_sched_opt_smt = value; rc = do_sysctl(xc_handle, &sysctl); return rc; @@ -370,7 +370,7 @@ int xc_set_vcpu_migration_delay(int xc_h sysctl.cmd = XEN_SYSCTL_pm_op; sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_vcpu_migration_delay; sysctl.u.pm_op.cpuid = 0; - sysctl.u.pm_op.set_vcpu_migration_delay = value; + sysctl.u.pm_op.u.set_vcpu_migration_delay = value; rc = do_sysctl(xc_handle, &sysctl); return rc; @@ -387,7 +387,7 @@ int xc_get_vcpu_migration_delay(int xc_h rc = do_sysctl(xc_handle, &sysctl); if (!rc && value) - *value = sysctl.u.pm_op.get_vcpu_migration_delay; + *value = sysctl.u.pm_op.u.get_vcpu_migration_delay; return rc; } @@ -403,9 +403,9 @@ int xc_get_cpuidle_max_cstate(int xc_han sysctl.cmd = XEN_SYSCTL_pm_op; sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_get_max_cstate; sysctl.u.pm_op.cpuid = 0; - sysctl.u.pm_op.get_max_cstate = 0; + sysctl.u.pm_op.u.get_max_cstate = 0; rc = do_sysctl(xc_handle, &sysctl); - *value = sysctl.u.pm_op.get_max_cstate; + *value = sysctl.u.pm_op.u.get_max_cstate; return rc; } @@ -420,7 +420,7 @@ int xc_set_cpuidle_max_cstate(int xc_han sysctl.cmd = XEN_SYSCTL_pm_op; sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_max_cstate; sysctl.u.pm_op.cpuid = 0; - sysctl.u.pm_op.set_max_cstate = value; + sysctl.u.pm_op.u.set_max_cstate = value; return do_sysctl(xc_handle, &sysctl); } diff -r 3c93fffa6502 -r bde24590c13a xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c Mon Dec 21 16:51:40 2009 +0000 +++ b/xen/arch/x86/mm/mem_sharing.c Tue Dec 22 11:33:15 2009 +0000 @@ -734,7 +734,7 @@ int mem_sharing_domctl(struct domain *d, rc = 0; if(!hap_enabled(d)) return -EINVAL; - d->arch.hvm_domain.mem_sharing_enabled = mec->enable; + d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable; mem_sharing_audit(); return 0; } @@ -742,19 +742,19 @@ int mem_sharing_domctl(struct domain *d, case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN: { - unsigned long gfn = mec->nominate.gfn; + unsigned long gfn = mec->u.nominate.u.gfn; shr_handle_t handle; if(!mem_sharing_enabled(d)) return -EINVAL; rc = mem_sharing_nominate_page(d, gfn, 0, &handle); - mec->nominate.handle = handle; + mec->u.nominate.handle = handle; mem_sharing_audit(); } break; case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF: { - grant_ref_t gref = mec->nominate.grant_ref; + grant_ref_t gref = mec->u.nominate.u.grant_ref; unsigned long gfn; shr_handle_t handle; @@ -763,15 +763,15 @@ int mem_sharing_domctl(struct domain *d, if(mem_sharing_gref_to_gfn(d, gref, &gfn) < 0) return -EINVAL; rc = mem_sharing_nominate_page(d, gfn, 3, &handle); - mec->nominate.handle = handle; + mec->u.nominate.handle = handle; mem_sharing_audit(); } break; case XEN_DOMCTL_MEM_SHARING_OP_SHARE: { - shr_handle_t sh = mec->share.source_handle; - shr_handle_t ch = mec->share.client_handle; + shr_handle_t sh = mec->u.share.source_handle; + shr_handle_t ch = mec->u.share.client_handle; rc = mem_sharing_share_pages(sh, ch); mem_sharing_audit(); } @@ -788,7 +788,7 @@ int mem_sharing_domctl(struct domain *d, case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN: { - unsigned long gfn = mec->debug.gfn; + unsigned long gfn = mec->u.debug.u.gfn; rc = mem_sharing_debug_gfn(d, gfn); mem_sharing_audit(); } @@ -796,7 +796,7 @@ int mem_sharing_domctl(struct domain *d, case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN: { - unsigned long mfn = mec->debug.mfn; + unsigned long mfn = mec->u.debug.u.mfn; rc = mem_sharing_debug_mfn(mfn); mem_sharing_audit(); } @@ -804,7 +804,7 @@ int mem_sharing_domctl(struct domain *d, case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF: { - grant_ref_t gref = mec->debug.gref; + grant_ref_t gref = mec->u.debug.u.gref; rc = mem_sharing_debug_gref(d, gref); mem_sharing_audit(); } diff -r 3c93fffa6502 -r bde24590c13a xen/drivers/acpi/pmstat.c --- a/xen/drivers/acpi/pmstat.c Mon Dec 21 16:51:40 2009 +0000 +++ b/xen/drivers/acpi/pmstat.c Tue Dec 22 11:33:15 2009 +0000 @@ -212,37 +212,37 @@ static int get_cpufreq_para(struct xen_s list_for_each(pos, &cpufreq_governor_list) gov_num++; - if ( (op->get_para.cpu_num != cpus_weight(policy->cpus)) || - (op->get_para.freq_num != pmpt->perf.state_count) || - (op->get_para.gov_num != gov_num) ) - { - op->get_para.cpu_num = cpus_weight(policy->cpus); - op->get_para.freq_num = pmpt->perf.state_count; - op->get_para.gov_num = gov_num; + if ( (op->u.get_para.cpu_num != cpus_weight(policy->cpus)) || + (op->u.get_para.freq_num != pmpt->perf.state_count) || + (op->u.get_para.gov_num != gov_num) ) + { + op->u.get_para.cpu_num = cpus_weight(policy->cpus); + op->u.get_para.freq_num = pmpt->perf.state_count; + op->u.get_para.gov_num = gov_num; return -EAGAIN; } - if ( !(affected_cpus = xmalloc_array(uint32_t, op->get_para.cpu_num)) ) + if ( !(affected_cpus = xmalloc_array(uint32_t, op->u.get_para.cpu_num)) ) return -ENOMEM; - memset(affected_cpus, 0, op->get_para.cpu_num * sizeof(uint32_t)); + memset(affected_cpus, 0, op->u.get_para.cpu_num * sizeof(uint32_t)); for_each_cpu_mask(cpu, policy->cpus) affected_cpus[j++] = cpu; - ret = copy_to_guest(op->get_para.affected_cpus, - affected_cpus, op->get_para.cpu_num); + ret = copy_to_guest(op->u.get_para.affected_cpus, + affected_cpus, op->u.get_para.cpu_num); xfree(affected_cpus); if ( ret ) return ret; if ( !(scaling_available_frequencies = - xmalloc_array(uint32_t, op->get_para.freq_num)) ) + xmalloc_array(uint32_t, op->u.get_para.freq_num)) ) return -ENOMEM; memset(scaling_available_frequencies, 0, - op->get_para.freq_num * sizeof(uint32_t)); - for ( i = 0; i < op->get_para.freq_num; i++ ) + op->u.get_para.freq_num * sizeof(uint32_t)); + for ( i = 0; i < op->u.get_para.freq_num; i++ ) scaling_available_frequencies[i] = pmpt->perf.states[i].core_frequency * 1000; - ret = copy_to_guest(op->get_para.scaling_available_frequencies, - scaling_available_frequencies, op->get_para.freq_num); + ret = copy_to_guest(op->u.get_para.scaling_available_frequencies, + scaling_available_frequencies, op->u.get_para.freq_num); xfree(scaling_available_frequencies); if ( ret ) return ret; @@ -258,47 +258,47 @@ static int get_cpufreq_para(struct xen_s xfree(scaling_available_governors); return ret; } - ret = copy_to_guest(op->get_para.scaling_available_governors, + ret = copy_to_guest(op->u.get_para.scaling_available_governors, scaling_available_governors, gov_num * CPUFREQ_NAME_LEN); xfree(scaling_available_governors); if ( ret ) return ret; - op->get_para.cpuinfo_cur_freq = + op->u.get_para.cpuinfo_cur_freq = cpufreq_driver->get ? cpufreq_driver->get(op->cpuid) : policy->cur; - op->get_para.cpuinfo_max_freq = policy->cpuinfo.max_freq; - op->get_para.cpuinfo_min_freq = policy->cpuinfo.min_freq; - op->get_para.scaling_cur_freq = policy->cur; - op->get_para.scaling_max_freq = policy->max; - op->get_para.scaling_min_freq = policy->min; + op->u.get_para.cpuinfo_max_freq = policy->cpuinfo.max_freq; + op->u.get_para.cpuinfo_min_freq = policy->cpuinfo.min_freq; + op->u.get_para.scaling_cur_freq = policy->cur; + op->u.get_para.scaling_max_freq = policy->max; + op->u.get_para.scaling_min_freq = policy->min; if ( cpufreq_driver->name ) - strlcpy(op->get_para.scaling_driver, + strlcpy(op->u.get_para.scaling_driver, cpufreq_driver->name, CPUFREQ_NAME_LEN); else - strlcpy(op->get_para.scaling_driver, "Unknown", CPUFREQ_NAME_LEN); + strlcpy(op->u.get_para.scaling_driver, "Unknown", CPUFREQ_NAME_LEN); if ( policy->governor->name ) - strlcpy(op->get_para.scaling_governor, + strlcpy(op->u.get_para.scaling_governor, policy->governor->name, CPUFREQ_NAME_LEN); else - strlcpy(op->get_para.scaling_governor, "Unknown", CPUFREQ_NAME_LEN); + strlcpy(op->u.get_para.scaling_governor, "Unknown", CPUFREQ_NAME_LEN); /* governor specific para */ - if ( !strnicmp(op->get_para.scaling_governor, + if ( !strnicmp(op->u.get_para.scaling_governor, "userspace", CPUFREQ_NAME_LEN) ) { - op->get_para.u.userspace.scaling_setspeed = policy->cur; - } - - if ( !strnicmp(op->get_para.scaling_governor, + op->u.get_para.u.userspace.scaling_setspeed = policy->cur; + } + + if ( !strnicmp(op->u.get_para.scaling_governor, "ondemand", CPUFREQ_NAME_LEN) ) { ret = get_cpufreq_ondemand_para( - &op->get_para.u.ondemand.sampling_rate_max, - &op->get_para.u.ondemand.sampling_rate_min, - &op->get_para.u.ondemand.sampling_rate, - &op->get_para.u.ondemand.up_threshold); + &op->u.get_para.u.ondemand.sampling_rate_max, + &op->u.get_para.u.ondemand.sampling_rate_min, + &op->u.get_para.u.ondemand.sampling_rate, + &op->u.get_para.u.ondemand.up_threshold); } return ret; @@ -317,7 +317,7 @@ static int set_cpufreq_gov(struct xen_sy memcpy(&new_policy, old_policy, sizeof(struct cpufreq_policy)); - new_policy.governor = __find_governor(op->set_gov.scaling_governor); + new_policy.governor = __find_governor(op->u.set_gov.scaling_governor); if (new_policy.governor == NULL) return -EINVAL; @@ -336,14 +336,14 @@ static int set_cpufreq_para(struct xen_s if ( !policy || !policy->governor ) return -EINVAL; - switch(op->set_para.ctrl_type) + switch(op->u.set_para.ctrl_type) { case SCALING_MAX_FREQ: { struct cpufreq_policy new_policy; memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); - new_policy.max = op->set_para.ctrl_value; + new_policy.max = op->u.set_para.ctrl_value; ret = __cpufreq_set_policy(policy, &new_policy); break; @@ -354,7 +354,7 @@ static int set_cpufreq_para(struct xen_s struct cpufreq_policy new_policy; memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); - new_policy.min = op->set_para.ctrl_value; + new_policy.min = op->u.set_para.ctrl_value; ret = __cpufreq_set_policy(policy, &new_policy); break; @@ -362,7 +362,7 @@ static int set_cpufreq_para(struct xen_s case SCALING_SETSPEED: { - unsigned int freq =op->set_para.ctrl_value; + unsigned int freq =op->u.set_para.ctrl_value; if ( !strnicmp(policy->governor->name, "userspace", CPUFREQ_NAME_LEN) ) @@ -375,7 +375,7 @@ static int set_cpufreq_para(struct xen_s case SAMPLING_RATE: { - unsigned int sampling_rate = op->set_para.ctrl_value; + unsigned int sampling_rate = op->u.set_para.ctrl_value; if ( !strnicmp(policy->governor->name, "ondemand", CPUFREQ_NAME_LEN) ) @@ -388,7 +388,7 @@ static int set_cpufreq_para(struct xen_s case UP_THRESHOLD: { - unsigned int up_threshold = op->set_para.ctrl_value; + unsigned int up_threshold = op->u.set_para.ctrl_value; if ( !strnicmp(policy->governor->name, "ondemand", CPUFREQ_NAME_LEN) ) @@ -412,7 +412,7 @@ static int get_cpufreq_avgfreq(struct xe if ( !op || !cpu_online(op->cpuid) ) return -EINVAL; - op->get_avgfreq = cpufreq_driver_getavg(op->cpuid, USR_GETAVG); + op->u.get_avgfreq = cpufreq_driver_getavg(op->cpuid, USR_GETAVG); return 0; } @@ -424,9 +424,9 @@ static int get_cputopo (struct xen_sysct XEN_GUEST_HANDLE_64(uint32) cpu_to_socket_arr; int arr_size, ret=0; - cpu_to_core_arr = op->get_topo.cpu_to_core; - cpu_to_socket_arr = op->get_topo.cpu_to_socket; - arr_size= min_t(uint32_t, op->get_topo.max_cpus, NR_CPUS); + cpu_to_core_arr = op->u.get_topo.cpu_to_core; + cpu_to_socket_arr = op->u.get_topo.cpu_to_socket; + arr_size= min_t(uint32_t, op->u.get_topo.max_cpus, NR_CPUS); if ( guest_handle_is_null( cpu_to_core_arr ) || guest_handle_is_null( cpu_to_socket_arr) ) @@ -458,7 +458,7 @@ static int get_cputopo (struct xen_sysct } } - op->get_topo.nr_cpus = nr_cpus + 1; + op->u.get_topo.nr_cpus = nr_cpus + 1; out: return ret; } @@ -519,33 +519,33 @@ int do_pm_op(struct xen_sysctl_pm_op *op uint32_t saved_value; saved_value = sched_smt_power_savings; - sched_smt_power_savings = !!op->set_sched_opt_smt; - op->set_sched_opt_smt = saved_value; + sched_smt_power_savings = !!op->u.set_sched_opt_smt; + op->u.set_sched_opt_smt = saved_value; break; } case XEN_SYSCTL_pm_op_set_vcpu_migration_delay: { - set_vcpu_migration_delay(op->set_vcpu_migration_delay); + set_vcpu_migration_delay(op->u.set_vcpu_migration_delay); break; } case XEN_SYSCTL_pm_op_get_vcpu_migration_delay: { - op->get_vcpu_migration_delay = get_vcpu_migration_delay(); + op->u.get_vcpu_migration_delay = get_vcpu_migration_delay(); break; } case XEN_SYSCTL_pm_op_get_max_cstate: { - op->get_max_cstate = acpi_get_cstate_limit(); + op->u.get_max_cstate = acpi_get_cstate_limit(); break; } case XEN_SYSCTL_pm_op_set_max_cstate: { - acpi_set_cstate_limit(op->set_max_cstate); + acpi_set_cstate_limit(op->u.set_max_cstate); break; } diff -r 3c93fffa6502 -r bde24590c13a xen/include/public/domctl.h --- a/xen/include/public/domctl.h Mon Dec 21 16:51:40 2009 +0000 +++ b/xen/include/public/domctl.h Tue Dec 22 11:33:15 2009 +0000 @@ -35,7 +35,7 @@ #include "xen.h" #include "grant_table.h" -#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005 +#define XEN_DOMCTL_INTERFACE_VERSION 0x00000006 struct xenctl_cpumap { XEN_GUEST_HANDLE_64(uint8) bitmap; @@ -539,7 +539,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_iopor #define XEN_DOMCTL_MEM_CACHEATTR_UCM 7 struct xen_domctl_pin_mem_cacheattr { uint64_aligned_t start, end; - unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ + uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ }; typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t); @@ -598,11 +598,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_t # define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF # define XEN_DOMCTL_set_cpuid 49 struct xen_domctl_cpuid { - unsigned int input[2]; - unsigned int eax; - unsigned int ebx; - unsigned int ecx; - unsigned int edx; + uint32_t input[2]; + uint32_t eax; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; }; typedef struct xen_domctl_cpuid xen_domctl_cpuid_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t); @@ -661,7 +661,7 @@ struct xen_guest_tsc_info { uint32_t gtsc_khz; uint32_t incarnation; uint32_t pad; - uint64_t elapsed_nsec; + uint64_aligned_t elapsed_nsec; }; typedef struct xen_guest_tsc_info xen_guest_tsc_info_t; DEFINE_XEN_GUEST_HANDLE(xen_guest_tsc_info_t); @@ -672,12 +672,14 @@ typedef struct xen_domctl_tsc_info { #define XEN_DOMCTL_gdbsx_guestmemio 1000 /* guest mem io */ struct xen_domctl_gdbsx_memio { + /* IN */ uint64_aligned_t pgd3val;/* optional: init_mm.pgd[3] value */ uint64_aligned_t gva; /* guest virtual address */ uint64_aligned_t uva; /* user buffer virtual address */ - int len; /* number of bytes to read/write */ - int gwr; /* 0 = read from guest. 1 = write to guest */ - int remain; /* bytes remaining to be copied */ + uint32_t len; /* number of bytes to read/write */ + uint8_t gwr; /* 0 = read from guest. 1 = write to guest */ + /* OUT */ + uint32_t remain; /* bytes remaining to be copied */ }; #define XEN_DOMCTL_gdbsx_pausevcpu 1001 @@ -688,10 +690,10 @@ struct xen_domctl_gdbsx_pauseunp_vcpu { #define XEN_DOMCTL_gdbsx_domstatus 1003 struct xen_domctl_gdbsx_domstatus { - int paused; /* is the domain paused */ + /* OUT */ + uint8_t paused; /* is the domain paused */ uint32_t vcpu_id; /* any vcpu in an event? */ uint32_t vcpu_ev; /* if yes, what event? */ - }; /* @@ -720,11 +722,11 @@ struct xen_domctl_mem_event_op { uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */ /* OP_ENABLE */ - unsigned long shared_addr; /* IN: Virtual address of shared page */ - unsigned long ring_addr; /* IN: Virtual address of ring page */ + uint64_aligned_t shared_addr; /* IN: Virtual address of shared page */ + uint64_aligned_t ring_addr; /* IN: Virtual address of ring page */ /* Other OPs */ - unsigned long gfn; /* IN: gfn of page being operated on */ + uint64_aligned_t gfn; /* IN: gfn of page being operated on */ }; typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t); @@ -747,30 +749,30 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_e #define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID (-9) struct xen_domctl_mem_sharing_op { - uint8_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ + uint8_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ union { - int enable; /* for OP_CONTROL */ - - struct mem_sharing_op_nominate { /* for OP_NOMINATE */ + uint8_t enable; /* OP_CONTROL */ + + struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */ union { - unsigned long gfn; /* IN: gfn to nominate */ + uint64_aligned_t gfn; /* IN: gfn to nominate */ uint32_t grant_ref; /* IN: grant ref to nominate */ - }; - uint64_t handle; /* OUT: the handle */ + } u; + uint64_aligned_t handle; /* OUT: the handle */ } nominate; - struct mem_sharing_op_share { - uint64_t source_handle; /* IN: handle to the source page */ - uint64_t client_handle; /* IN: handle to the client page */ + struct mem_sharing_op_share { /* OP_SHARE */ + uint64_aligned_t source_handle; /* IN: handle to the source page */ + uint64_aligned_t client_handle; /* IN: handle to the client page */ } share; - struct mem_sharing_op_debug { + struct mem_sharing_op_debug { /* OP_DEBUG_xxx */ union { - unsigned long gfn; /* IN: gfn to debug */ - unsigned long mfn; /* IN: mfn to debug */ + uint64_aligned_t gfn; /* IN: gfn to debug */ + uint64_aligned_t mfn; /* IN: mfn to debug */ grant_ref_t gref; /* IN: gref to debug */ - }; + } u; } debug; - }; + } u; }; typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t); diff -r 3c93fffa6502 -r bde24590c13a xen/include/public/sysctl.h --- a/xen/include/public/sysctl.h Mon Dec 21 16:51:40 2009 +0000 +++ b/xen/include/public/sysctl.h Tue Dec 22 11:33:15 2009 +0000 @@ -403,7 +403,7 @@ struct xen_sysctl_pm_op { uint32_t set_max_cstate; uint32_t get_vcpu_migration_delay; uint32_t set_vcpu_migration_delay; - }; + } u; }; #define XEN_SYSCTL_page_offline_op 14 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |