|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging-4.12] xen/evtchn: revert 52e1fc47abc3a0123
commit 1dd870ec2b6080d67944fd02d2d548716d7551bb
Author: Juergen Gross <jgross@xxxxxxxx>
AuthorDate: Tue Dec 1 15:46:00 2020 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Dec 1 15:46:00 2020 +0100
xen/evtchn: revert 52e1fc47abc3a0123
With the event channel lock no longer disabling interrupts commit
52e1fc47abc3a0123 ("evtchn/Flask: pre-allocate node on send path") can
be reverted again.
Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
master commit: b5ad37f8e9284cc147218f7a5193d739ae7b956f
master date: 2020-11-10 14:37:15 +0100
---
xen/common/event_channel.c | 6 ----
xen/include/xsm/xsm.h | 1 -
xen/xsm/flask/avc.c | 78 ++++-----------------------------------------
xen/xsm/flask/hooks.c | 10 ------
xen/xsm/flask/include/avc.h | 2 --
5 files changed, 7 insertions(+), 90 deletions(-)
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 181e5abaa6..45852c07ba 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -744,12 +744,6 @@ int evtchn_send(struct domain *ld, unsigned int lport)
if ( !port_is_valid(ld, lport) )
return -EINVAL;
- /*
- * As the call further down needs to avoid allocations (due to running
- * with IRQs off), give XSM a chance to pre-allocate if needed.
- */
- xsm_evtchn_send(XSM_HOOK, ld, NULL);
-
lchn = evtchn_from_port(ld, lport);
evtchn_read_lock(lchn);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 56cfbbe2d8..fc9d6b5bf0 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -59,7 +59,6 @@ struct xsm_operations {
int (*evtchn_interdomain) (struct domain *d1, struct evtchn *chn1,
struct domain *d2, struct evtchn
*chn2);
void (*evtchn_close_post) (struct evtchn *chn);
- /* Note: Next hook may be called with 'chn' set to NULL. See call site. */
int (*evtchn_send) (struct domain *d, struct evtchn *chn);
int (*evtchn_status) (struct domain *d, struct evtchn *chn);
int (*evtchn_reset) (struct domain *d1, struct domain *d2);
diff --git a/xen/xsm/flask/avc.c b/xen/xsm/flask/avc.c
index 7c170f520a..640c708659 100644
--- a/xen/xsm/flask/avc.c
+++ b/xen/xsm/flask/avc.c
@@ -24,9 +24,7 @@
#include <xen/prefetch.h>
#include <xen/kernel.h>
#include <xen/sched.h>
-#include <xen/cpu.h>
#include <xen/init.h>
-#include <xen/percpu.h>
#include <xen/rcupdate.h>
#include <asm/atomic.h>
#include <asm/current.h>
@@ -343,79 +341,17 @@ static inline int avc_reclaim_node(void)
return ecx;
}
-static struct avc_node *new_node(void)
-{
- struct avc_node *node = xzalloc(struct avc_node);
-
- if ( node )
- {
- INIT_RCU_HEAD(&node->rhead);
- INIT_HLIST_NODE(&node->list);
- avc_cache_stats_incr(allocations);
- }
-
- return node;
-}
-
-/*
- * avc_has_perm_noaudit() may consume up to two nodes, which we may not be
- * able to obtain from the allocator at that point. Since the is merely
- * about caching earlier decisions, allow for (just) one pre-allocated node.
- */
-static DEFINE_PER_CPU(struct avc_node *, prealloc_node);
-
-void avc_prealloc(void)
-{
- struct avc_node **prealloc = &this_cpu(prealloc_node);
-
- if ( !*prealloc )
- *prealloc = new_node();
-}
-
-static int cpu_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct avc_node **prealloc = &per_cpu(prealloc_node, cpu);
-
- if ( action == CPU_DEAD && *prealloc )
- {
- xfree(*prealloc);
- *prealloc = NULL;
- avc_cache_stats_incr(frees);
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block cpu_nfb = {
- .notifier_call = cpu_callback,
- .priority = 99
-};
-
-static int __init cpu_nfb_init(void)
-{
- register_cpu_notifier(&cpu_nfb);
- return 0;
-}
-__initcall(cpu_nfb_init);
-
static struct avc_node *avc_alloc_node(void)
{
- struct avc_node *node, **prealloc = &this_cpu(prealloc_node);
+ struct avc_node *node;
- node = *prealloc;
- *prealloc = NULL;
+ node = xzalloc(struct avc_node);
+ if (!node)
+ goto out;
- if ( !node )
- {
- /* Must not call xmalloc() & Co with IRQs off. */
- if ( !local_irq_is_enabled() )
- goto out;
- node = new_node();
- if ( !node )
- goto out;
- }
+ INIT_RCU_HEAD(&node->rhead);
+ INIT_HLIST_NODE(&node->list);
+ avc_cache_stats_incr(allocations);
atomic_inc(&avc_cache.active_nodes);
if ( atomic_read(&avc_cache.active_nodes) > avc_cache_threshold )
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 277fb66e91..3d00c747f6 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -280,16 +280,6 @@ static int flask_evtchn_send(struct domain *d, struct
evtchn *chn)
{
int rc;
- /*
- * When called with non-NULL chn, memory allocation may not be permitted.
- * Allow AVC to preallocate nodes as necessary upon early notification.
- */
- if ( !chn )
- {
- avc_prealloc();
- return 0;
- }
-
switch ( chn->state )
{
case ECS_INTERDOMAIN:
diff --git a/xen/xsm/flask/include/avc.h b/xen/xsm/flask/include/avc.h
index 32ecc0ea2a..93386bd7a1 100644
--- a/xen/xsm/flask/include/avc.h
+++ b/xen/xsm/flask/include/avc.h
@@ -90,8 +90,6 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32
requested,
int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested,
struct avc_audit_data *auditdata);
-void avc_prealloc(void);
-
/* Exported to selinuxfs */
struct xen_flask_hash_stats;
int avc_get_hash_stats(struct xen_flask_hash_stats *arg);
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.12
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |