[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH 1/2] Rate limit per DPC in Xenvif only.
On 20/05/2022 15:14, Martin Harvey wrote: From: Owen Smith <owen.smith@xxxxxxxxxx> Change the receive algorithm, such that, for any particular receive DPC, only a fixed number of packets can be pushed up the stack. Under higher load conditions, another DPC is scheduled to process the remainder before any more packets are removed from the descriptor ring. This functionality currently local to xenvif. A more complete solution, where xennet determines when to finish a batch of packets will be included in a subsequent patch. Signed-off-by: Martin Harvey <Martin.Harvey@xxxxxxxxxx> Ported-by: Owen Smith <owen.smith@xxxxxxxxxx> --- src/xenvif/receiver.c | 82 ++++++++++++++++++++++++++++++++----------- 1 file changed, 61 insertions(+), 21 deletions(-) diff --git a/src/xenvif/receiver.c b/src/xenvif/receiver.c index 505505e..f8bfdd4 100644 --- a/src/xenvif/receiver.c +++ b/src/xenvif/receiver.c @@ -99,6 +99,8 @@ typedef struct _XENVIF_RECEIVER_RING { BOOLEAN Connected; BOOLEAN Enabled; BOOLEAN Stopped; + BOOLEAN Backpressured; BackPressured would be nicer. I'll tweak it. + BOOLEAN FinalFlush; XENVIF_VIF_OFFLOAD_OPTIONS OffloadOptions; ULONG BackfillSize; PXENBUS_DEBUG_CALLBACK DebugCallback; @@ -148,6 +150,8 @@ struct _XENVIF_RECEIVER {#define XENVIF_RECEIVER_TAG 'ECER' +#define RX_BUFFERING_MAX 2048 // X-ref XenNet IN_NDIS_MAX (half in NDIS half elsewhere)+ I don't tend to use abbreviations in comments. I'll fix for style. static FORCEINLINE PVOID __ReceiverAllocate( IN ULONG Length @@ -1338,43 +1342,53 @@ __ReceiverRingSwizzle( PXENVIF_VIF_CONTEXT Context; LIST_ENTRY List; PLIST_ENTRY ListEntry; + BOOLEAN AllFlushed; + ULONG PushedUpstream; I think this would be better named 'Queued'. Receiver = Ring->Receiver;Frontend = Receiver->Frontend; Context = PdoGetVifContext(FrontendGetPdo(Frontend)); + AllFlushed = TRUE; + PushedUpstream = 0;InitializeListHead(&List); - ListEntry = InterlockedExchangePointer(&Ring->PacketQueue, NULL);+ if (IsListEmpty(&Ring->PacketComplete) || Ring->FinalFlush) + { + ListEntry = InterlockedExchangePointer(&Ring->PacketQueue, NULL);- // Packets are held in the queue in reverse order so that the most- // recent is always head of the list. This is necessary to allow - // addition to the list to be done atomically. + // Packets are held in the queue in reverse order so that the most + // recent is always head of the list. This is necessary to allow + // addition to the list to be done atomically.- while (ListEntry != NULL) {- PLIST_ENTRY NextEntry; + while (ListEntry != NULL) { + PLIST_ENTRY NextEntry;- NextEntry = ListEntry->Blink;- ListEntry->Flink = ListEntry->Blink = ListEntry; + NextEntry = ListEntry->Blink; + ListEntry->Flink = ListEntry->Blink = ListEntry;- InsertHeadList(&List, ListEntry);+ InsertHeadList(&List, ListEntry);- ListEntry = NextEntry;- } + ListEntry = NextEntry; + }- while (!IsListEmpty(&List)) {- PXENVIF_RECEIVER_PACKET Packet; + while (!IsListEmpty(&List)) { + PXENVIF_RECEIVER_PACKET Packet;- ListEntry = RemoveHeadList(&List);- ASSERT3P(ListEntry, !=, &List); + ListEntry = RemoveHeadList(&List); + ASSERT3P(ListEntry, !=, &List);- RtlZeroMemory(ListEntry, sizeof (LIST_ENTRY));+ RtlZeroMemory(ListEntry, sizeof (LIST_ENTRY));- Packet = CONTAINING_RECORD(ListEntry, XENVIF_RECEIVER_PACKET, ListEntry);- ReceiverRingProcessPacket(Ring, Packet); + Packet = CONTAINING_RECORD(ListEntry, XENVIF_RECEIVER_PACKET, ListEntry); + ReceiverRingProcessPacket(Ring, Packet); + } + } else { + AllFlushed = FALSE; }- while (!IsListEmpty(&Ring->PacketComplete)) {+ while (!IsListEmpty(&Ring->PacketComplete) && + ((PushedUpstream < RX_BUFFERING_MAX) || Ring->FinalFlush)) { PXENVIF_RECEIVER_PACKET Packet; PXENVIF_PACKET_INFO Info; PUCHAR BaseVa; @@ -1382,6 +1396,8 @@ __ReceiverRingSwizzle( PETHERNET_ADDRESS DestinationAddress; ETHERNET_ADDRESS_TYPE Type;+ PushedUpstream++;+ ListEntry = RemoveHeadList(&Ring->PacketComplete); ASSERT3P(ListEntry, !=, &Ring->PacketComplete);@@ -1537,9 +1553,29 @@ __ReceiverRingSwizzle(Packet->TagControlInformation, &Packet->Info, &Packet->Hash, - !IsListEmpty(&Ring->PacketComplete) ? TRUE : FALSE, + ((!IsListEmpty(&Ring->PacketComplete)) && + ((PushedUpstream < RX_BUFFERING_MAX) || Ring->FinalFlush)) ? TRUE : FALSE, Packet); } + + if (!IsListEmpty(&Ring->PacketComplete)) + AllFlushed = FALSE; + + if (!AllFlushed) { + //Re-run remainder from back of DPC queue. + Ring->Backpressured = TRUE; + if (KeInsertQueueDpc(&Ring->QueueDpc, NULL, NULL)) + Ring->QueueDpcs++; + } else { I think Backpressured can be cleared unconditionally here, can't it? That way you could assert that it is false after setting FinalFlush and queuing the DPC, although the zero-check will pick that up automatically. Paul + if ((Ring->Backpressured) && !Ring->FinalFlush) { + //Not any more - restart dataflow from initial ring poll. + Ring->Backpressured = FALSE; + + //PollDpc zeroed before final flush, don't queue it here. + if (KeInsertQueueDpc(&Ring->PollDpc, NULL, NULL)) + Ring->PollDpcs++; + } + } }static FORCEINLINE VOID@@ -1990,7 +2026,7 @@ ReceiverRingPoll(Count = 0; - if (!Ring->Enabled)+ if (!Ring->Enabled || (Ring->Backpressured && !Ring->FinalFlush)) goto done;for (;;) {@@ -2963,8 +2999,12 @@ __ReceiverRingTeardown( Ring->BackfillSize = 0; Ring->OffloadOptions.Value = 0;+ Ring->FinalFlush = TRUE;+ KeInsertQueueDpc(&Ring->QueueDpc, NULL, NULL); KeFlushQueuedDpcs(); RtlZeroMemory(&Ring->QueueDpc, sizeof (KDPC)); + Ring->Backpressured = FALSE; + Ring->FinalFlush = FALSE;ThreadAlert(Ring->WatchdogThread);ThreadJoin(Ring->WatchdogThread);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |