[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[patch 20/30] net/mlx4: Replace irq_to_desc() abuse



No driver has any business with the internals of an interrupt
descriptor. Storing a pointer to it just to use yet another helper at the
actual usage site to retrieve the affinity mask is creative at best. Just
because C does not allow encapsulation does not mean that the kernel has no
limits.

Retrieve a pointer to the affinity mask itself and use that. It's still
using an interface which is usually not for random drivers, but definitely
less hideous than the previous hack.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Tariq Toukan <tariqt@xxxxxxxxxx>
Cc: "David S. Miller" <davem@xxxxxxxxxxxxx>
Cc: Jakub Kicinski <kuba@xxxxxxxxxx>
Cc: netdev@xxxxxxxxxxxxxxx
Cc: linux-rdma@xxxxxxxxxxxxxxx
---
 drivers/net/ethernet/mellanox/mlx4/en_cq.c   |    8 +++-----
 drivers/net/ethernet/mellanox/mlx4/en_rx.c   |    6 +-----
 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |    3 ++-
 3 files changed, 6 insertions(+), 11 deletions(-)

--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -90,7 +90,7 @@ int mlx4_en_activate_cq(struct mlx4_en_p
                        int cq_idx)
 {
        struct mlx4_en_dev *mdev = priv->mdev;
-       int err = 0;
+       int irq, err = 0;
        int timestamp_en = 0;
        bool assigned_eq = false;
 
@@ -116,10 +116,8 @@ int mlx4_en_activate_cq(struct mlx4_en_p
 
                        assigned_eq = true;
                }
-
-               cq->irq_desc =
-                       irq_to_desc(mlx4_eq_get_irq(mdev->dev,
-                                                   cq->vector));
+               irq = mlx4_eq_get_irq(mdev->dev, cq->vector);
+               cq->aff_mask = irq_get_affinity_mask(irq);
        } else {
                /* For TX we use the same irq per
                ring we assigned for the RX    */
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -959,8 +959,6 @@ int mlx4_en_poll_rx_cq(struct napi_struc
 
        /* If we used up all the quota - we're probably not done yet... */
        if (done == budget || !clean_complete) {
-               const struct cpumask *aff;
-               struct irq_data *idata;
                int cpu_curr;
 
                /* in case we got here because of !clean_complete */
@@ -969,10 +967,8 @@ int mlx4_en_poll_rx_cq(struct napi_struc
                INC_PERF_COUNTER(priv->pstats.napi_quota);
 
                cpu_curr = smp_processor_id();
-               idata = irq_desc_get_irq_data(cq->irq_desc);
-               aff = irq_data_get_affinity_mask(idata);
 
-               if (likely(cpumask_test_cpu(cpu_curr, aff)))
+               if (likely(cpumask_test_cpu(cpu_curr, cq->aff_mask)))
                        return budget;
 
                /* Current cpu is not according to smp_irq_affinity -
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -46,6 +46,7 @@
 #endif
 #include <linux/cpu_rmap.h>
 #include <linux/ptp_clock_kernel.h>
+#include <linux/irq.h>
 #include <net/xdp.h>
 
 #include <linux/mlx4/device.h>
@@ -380,7 +381,7 @@ struct mlx4_en_cq {
        struct mlx4_cqe *buf;
 #define MLX4_EN_OPCODE_ERROR   0x1e
 
-       struct irq_desc *irq_desc;
+       const struct cpumask *aff_mask;
 };
 
 struct mlx4_en_port_profile {




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.