[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 1/3] Fix buffer overrun when suspending VMs with many vCPUs


  • To: win-pv-devel@xxxxxxxxxxxxxxxxxxxx
  • From: "Durrant, Paul" <xadimgnik@xxxxxxxxx>
  • Date: Wed, 7 Jun 2023 10:27:42 +0100
  • Delivery-date: Wed, 07 Jun 2023 09:27:51 +0000
  • List-id: Developer list for the Windows PV Drivers subproject <win-pv-devel.lists.xenproject.org>

On 25/05/2023 16:14, Owen Smith wrote:
Dynamically allocate the KDPC array when there are more active vCPUs that
would fit into the single PAGE_SIZE region that is pre-allocated.

__Section is defined as a PAGE_SIZE region, which can only contain a limited
number of KDPC objects in addition to the SYNC_CONTEXT header. When there is
insufficient space for a KDPC object per vCPU, dynamically allocate space for
the KDPC array. This prevents a buffer overrun when accessing indexes with
64 vCPUs. Dynamic allocation will allow this to expand to many vCPUs beyond
64.

Signed-off-by: Owen Smith <owen.smith@xxxxxxxxx>

Let's just dynamically allocate in all cases. I'll adjust accordingly.

Acked-by: Paul Durrant <paul@xxxxxxx>

---
  src/xenbus/sync.c | 50 ++++++++++++++++++++++++++++++++++++++++++++---
  1 file changed, 47 insertions(+), 3 deletions(-)

diff --git a/src/xenbus/sync.c b/src/xenbus/sync.c
index fdfaf2d..4418c8e 100644
--- a/src/xenbus/sync.c
+++ b/src/xenbus/sync.c
@@ -96,12 +96,43 @@ typedef struct  _SYNC_CONTEXT {
      LONG            ProcessorCount;
      SYNC_REQUEST    Request;
      LONG            CompletionCount;
+    PKDPC           Dpcs;
      KDPC            Dpc[1];
  } SYNC_CONTEXT, *PSYNC_CONTEXT;
static PSYNC_CONTEXT SyncContext = (PVOID)__Section;
  static LONG             SyncOwner = -1;
+#define MAX_DPCS_PER_PAGE ((PAGE_SIZE - sizeof(SYNC_CONTEXT)) / sizeof(KDPC))
+#define SYNC_POOL_TAG       'CNYS'
+
+static FORCEINLINE PVOID
+__SyncAllocate(
+    IN  ULONG   Size
+    )
+{
+    PVOID       Buffer;
+
+    if (Size == 0)
+        return NULL;
+
+    Buffer = ExAllocatePoolWithTag(NonPagedPool,
+                                   Size,
+                                   SYNC_POOL_TAG);
+    if (Buffer != NULL)
+        RtlZeroMemory(Buffer, Size);
+
+    return Buffer;
+}
+
+static FORCEINLINE VOID
+__SyncFree(
+    IN  PVOID   Buffer
+    )
+{
+    ExFreePoolWithTag(Buffer, SYNC_POOL_TAG);
+}
+
  static FORCEINLINE VOID
  __SyncAcquire(
      IN  LONG    Index
@@ -343,15 +374,23 @@ SyncCapture(
      Context->Late = Late;
Context->CompletionCount = 0;
-    KeMemoryBarrier();
+
+    Context->Dpcs = &Context->Dpc[0];
Context->ProcessorCount = KeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS);
+    if (Context->ProcessorCount >= MAX_DPCS_PER_PAGE) {
+        Context->Dpcs = __SyncAllocate(Context->ProcessorCount * sizeof(KDPC));
+        ASSERT(Context->Dpcs != NULL);
+    }
+
+    KeMemoryBarrier();
for (Index = 0; Index < Context->ProcessorCount; Index++) {
-        PKDPC       Dpc = &Context->Dpc[Index];
+        PKDPC       Dpc = &Context->Dpcs[Index];
          NTSTATUS    status;
- ASSERT3U((ULONG_PTR)(Dpc + 1), <, (ULONG_PTR)__Section + PAGE_SIZE);
+        // Imply Context->Dpcs == &Context->Dpc[0] ->
+        //  ASSERT3U((ULONG_PTR)(Dpc + 1), <, (ULONG_PTR)__Section + 
PAGE_SIZE);
status = KeGetProcessorNumberFromIndex(Index, &ProcNumber);
          ASSERT(NT_SUCCESS(status));
@@ -489,6 +528,11 @@ SyncRelease(
__SyncWait(); + if (Context->ProcessorCount >= MAX_DPCS_PER_PAGE) {
+        ASSERT3P(Context->Dpcs, !=, &Context->Dpc[0]);
+        __SyncFree(Context->Dpcs);
+    }
+
      RtlZeroMemory(Context, PAGE_SIZE);
__SyncRelease();




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.