[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] vtd: do FLR before xc.domain_destroy()
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1219756299 -3600 # Node ID c5a7ceb199cde8a1181aad6059ad17af8176ab05 # Parent 493a0a87919e4403d88dc716254fbdf646afa2f1 vtd: do FLR before xc.domain_destroy() When we "xm destroy" a guest with assigned devices, in xen/common/domain.c:domain_kill(), we relinquish guest's memory first, then invoke domain_destroy() -> complete_domain_destroy() -> arch_domain_destroy() -> pci_release_devices()/iommu_domain_destroy(). Between relinquish_memory() and domain_destroy(), a device may be issuing DMA request and access guest's relinquished memory. We can do_FLR() to quiesce devices before xc.domain_destroy(). Signed-off-by: Dexuan Cui <dexuan.cui@xxxxxxxxx> --- tools/python/xen/util/pci.py | 37 +++++++++++++++++++++++--- tools/python/xen/xend/XendDomain.py | 4 ++ tools/python/xen/xend/XendDomainInfo.py | 24 ++++++++++++++++ tools/python/xen/xend/server/DevController.py | 8 ----- tools/python/xen/xend/server/pciif.py | 25 +++-------------- 5 files changed, 67 insertions(+), 31 deletions(-) diff -r 493a0a87919e -r c5a7ceb199cd tools/python/xen/util/pci.py --- a/tools/python/xen/util/pci.py Tue Aug 26 14:10:17 2008 +0100 +++ b/tools/python/xen/util/pci.py Tue Aug 26 14:11:39 2008 +0100 @@ -40,6 +40,7 @@ DEV_TYPE_PCI_BRIDGE = 2 DEV_TYPE_PCI_BRIDGE = 2 DEV_TYPE_PCI = 3 +PCI_VENDOR_ID = 0x0 PCI_STATUS = 0x6 PCI_CLASS_DEVICE = 0x0a PCI_CLASS_BRIDGE_PCI = 0x0604 @@ -68,6 +69,11 @@ PCI_PM_CTRL_NO_SOFT_RESET = 0x0004 PCI_PM_CTRL_NO_SOFT_RESET = 0x0004 PCI_PM_CTRL_STATE_MASK = 0x0003 PCI_D3hot = 3 + +VENDOR_INTEL = 0x8086 +PCI_CAP_ID_VENDOR_SPECIFIC_CAP = 0x09 +PCI_CLASS_ID_USB = 0x0c03 +PCI_USB_FLRCTRL = 0x4 PCI_CAP_ID_AF = 0x13 PCI_AF_CAPs = 0x3 @@ -487,7 +493,7 @@ class PciDevice: def do_Dstate_transition(self): pos = self.find_cap_offset(PCI_CAP_ID_PM) if pos == 0: - return + return False (pci_list, cfg_list) = save_pci_conf_space([self.name]) @@ -504,6 +510,31 @@ class PciDevice: time.sleep(0.010) restore_pci_conf_space((pci_list, cfg_list)) + return True + + def do_vendor_specific_FLR_method(self): + pos = self.find_cap_offset(PCI_CAP_ID_VENDOR_SPECIFIC_CAP) + if pos == 0: + return + + vendor_id = self.pci_conf_read16(PCI_VENDOR_ID) + if vendor_id != VENDOR_INTEL: + return + + class_id = self.pci_conf_read16(PCI_CLASS_DEVICE) + if class_id != PCI_CLASS_ID_USB: + return + + (pci_list, cfg_list) = save_pci_conf_space([self.name]) + + self.pci_conf_write8(pos + PCI_USB_FLRCTRL, 1) + time.sleep(0.010) + + restore_pci_conf_space((pci_list, cfg_list)) + + def do_FLR_for_integrated_device(self): + if not self.do_Dstate_transition(): + self.do_vendor_specific_FLR_method() def find_all_the_multi_functions(self): sysfs_mnt = find_sysfs_mnt() @@ -676,7 +707,7 @@ class PciDevice: restore_pci_conf_space((pci_list, cfg_list)) else: if self.bus == 0: - self.do_Dstate_transition() + self.do_FLR_for_integrated_device() else: funcs = self.find_all_the_multi_functions() self.devs_check_driver(funcs) @@ -697,7 +728,7 @@ class PciDevice: restore_pci_conf_space((pci_list, cfg_list)) else: if self.bus == 0: - self.do_Dstate_transition() + self.do_FLR_for_integrated_device() else: devs = self.find_coassigned_devices(False) # Remove the element 0 which is a bridge diff -r 493a0a87919e -r c5a7ceb199cd tools/python/xen/xend/XendDomain.py --- a/tools/python/xen/xend/XendDomain.py Tue Aug 26 14:10:17 2008 +0100 +++ b/tools/python/xen/xend/XendDomain.py Tue Aug 26 14:11:39 2008 +0100 @@ -419,6 +419,8 @@ class XendDomain: except VmError: log.exception("Unable to recreate domain") try: + xc.domain_pause(domid) + do_FLR(domid) xc.domain_destroy(domid) except: log.exception("Hard destruction of domain failed: %d" % @@ -1255,6 +1257,8 @@ class XendDomain: val = dominfo.destroy() else: try: + xc.domain_pause(int(domid)) + do_FLR(int(domid)) val = xc.domain_destroy(int(domid)) except ValueError: raise XendInvalidDomain(domid) diff -r 493a0a87919e -r c5a7ceb199cd tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py Tue Aug 26 14:10:17 2008 +0100 +++ b/tools/python/xen/xend/XendDomainInfo.py Tue Aug 26 14:11:39 2008 +0100 @@ -287,6 +287,28 @@ def dom_get(dom): log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err)) return None +def do_FLR(domid): + from xen.xend.server.pciif import parse_pci_name, PciDevice + path = '/local/domain/0/backend/pci/%u/0/' % domid + num_devs = xstransact.Read(path + 'num_devs'); + if num_devs is None or num_devs == "": + return; + + num_devs = int(xstransact.Read(path + 'num_devs')); + + dev_str_list = [] + for i in range(num_devs): + dev_str = xstransact.Read(path + 'dev-%i' % i) + dev_str_list = dev_str_list + [dev_str] + + for dev_str in dev_str_list: + (dom, b, d, f) = parse_pci_name(dev_str) + try: + dev = PciDevice(dom, b, d, f) + except Exception, e: + raise VmError("pci: failed to locate device and "+ + "parse it's resources - "+str(e)) + dev.do_FLR() class XendDomainInfo: """An object represents a domain. @@ -2410,6 +2432,8 @@ class XendDomainInfo: try: if self.domid is not None: xc.domain_destroy_hook(self.domid) + xc.domain_pause(self.domid) + do_FLR(self.domid) xc.domain_destroy(self.domid) for state in DOM_STATES_OLD: self.info[state] = 0 diff -r 493a0a87919e -r c5a7ceb199cd tools/python/xen/xend/server/DevController.py --- a/tools/python/xen/xend/server/DevController.py Tue Aug 26 14:10:17 2008 +0100 +++ b/tools/python/xen/xend/server/DevController.py Tue Aug 26 14:11:39 2008 +0100 @@ -223,12 +223,6 @@ class DevController: raise VmError('%s devices may not be reconfigured' % self.deviceClass) - def cleanupDeviceOnDomainDestroy(self, devid): - """ Some devices may need special cleanup when the guest domain - is destroyed. - """ - return - def destroyDevice(self, devid, force): """Destroy the specified device. @@ -244,8 +238,6 @@ class DevController: """ dev = self.convertToDeviceNumber(devid) - - self.cleanupDeviceOnDomainDestroy(dev) # Modify online status /before/ updating state (latter is watched by # drivers, so this ordering avoids a race). diff -r 493a0a87919e -r c5a7ceb199cd tools/python/xen/xend/server/pciif.py --- a/tools/python/xen/xend/server/pciif.py Tue Aug 26 14:10:17 2008 +0100 +++ b/tools/python/xen/xend/server/pciif.py Tue Aug 26 14:11:39 2008 +0100 @@ -383,10 +383,10 @@ class PciController(DevController): if (dev.dev_type == DEV_TYPE_PCIe_ENDPOINT) and not dev.pcie_flr: if dev.bus == 0: # We cope with this case by using the Dstate transition - # method for now. + # method or some vendor specific methods for now. err_msg = 'pci: %s: it is on bus 0, but has no PCIe' +\ ' FLR Capability. Will try the Dstate transition'+\ - ' method if available.' + ' method or some vendor specific methods if available.' log.warn(err_msg % dev.name) else: funcs = dev.find_all_the_multi_functions() @@ -404,10 +404,11 @@ class PciController(DevController): if dev.bus == 0 or arch.type == "ia64": if not dev.pci_af_flr: # We cope with this case by using the Dstate transition - # method for now. + # method or some vendor specific methods for now. err_msg = 'pci: %s: it is on bus 0, but has no PCI' +\ ' Advanced Capabilities for FLR. Will try the'+\ - ' Dstate transition method if available.' + ' Dstate transition method or some vendor' +\ + ' specific methods if available.' log.warn(err_msg % dev.name) else: # All devices behind the uppermost PCI/PCI-X bridge must be\ @@ -543,22 +544,6 @@ class PciController(DevController): return new_num_devs - def cleanupDeviceOnDomainDestroy(self, devid): - num_devs = int(self.readBackend(devid, 'num_devs')) - dev_str_list = [] - for i in range(num_devs): - dev_str = self.readBackend(devid, 'dev-%i' % i) - dev_str_list = dev_str_list + [dev_str] - - for dev_str in dev_str_list: - (dom, b, d, f) = parse_pci_name(dev_str) - try: - dev = PciDevice(dom, b, d, f) - except Exception, e: - raise VmError("pci: failed to locate device and "+ - "parse it's resources - "+str(e)) - dev.do_FLR() - def waitForBackend(self,devid): return (0, "ok - no hotplug") _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |