]> git.itanic.dy.fi Git - linux-stable/commitdiff
net: emac: remove IBM_EMAC_RX_SKB_HEADROOM
authorChristian Lamparter <chunkeey@gmail.com>
Tue, 5 Feb 2019 21:20:09 +0000 (22:20 +0100)
committerDavid S. Miller <davem@davemloft.net>
Thu, 7 Feb 2019 03:50:41 +0000 (19:50 -0800)
The EMAC driver had a custom IBM_EMAC_RX_SKB_HEADROOM
Kconfig option that reserved additional skb headroom for RX.
This patch removes the option and migrates the code
to use napi_alloc_skb() and netdev_alloc_skb_ip_align()
in its place.

Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/ibm/emac/Kconfig
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/core.h

index 90d49191beb36880b58cf85c9c2e1cd8b41bad1d..eacf7e141fdc20cb665bcedcebcc521448f9b6f2 100644 (file)
@@ -28,18 +28,6 @@ config IBM_EMAC_RX_COPY_THRESHOLD
        depends on IBM_EMAC
        default "256"
 
-config IBM_EMAC_RX_SKB_HEADROOM
-       int "Additional RX skb headroom (bytes)"
-       depends on IBM_EMAC
-       default "0"
-       help
-         Additional receive skb headroom. Note, that driver
-         will always reserve at least 2 bytes to make IP header
-         aligned, so usually there is no need to add any additional
-         headroom.
-
-         If unsure, set to 0.
-
 config IBM_EMAC_DEBUG
        bool "Debugging"
        depends on IBM_EMAC
index 209255495bc978e223f81700dc9cb7be1046f4cc..3c2a5759844adbf894ffba9d673d231e0c4c0dee 100644 (file)
@@ -1071,7 +1071,9 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
 
        /* Second pass, allocate new skbs */
        for (i = 0; i < NUM_RX_BUFF; ++i) {
-               struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size);
                if (!skb) {
                        ret = -ENOMEM;
                        goto oom;
@@ -1080,10 +1082,10 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
                BUG_ON(!dev->rx_skb[i]);
                dev_kfree_skb(dev->rx_skb[i]);
 
-               skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
                dev->rx_desc[i].data_ptr =
-                   dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
-                                  DMA_FROM_DEVICE) + 2;
+                   dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
+                                  rx_sync_size, DMA_FROM_DEVICE)
+                                  + NET_IP_ALIGN;
                dev->rx_skb[i] = skb;
        }
  skip:
@@ -1174,20 +1176,18 @@ static void emac_clean_rx_ring(struct emac_instance *dev)
        }
 }
 
-static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
-                                   gfp_t flags)
+static int
+__emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot)
 {
-       struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
        if (unlikely(!skb))
                return -ENOMEM;
 
        dev->rx_skb[slot] = skb;
        dev->rx_desc[slot].data_len = 0;
 
-       skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
        dev->rx_desc[slot].data_ptr =
-           dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
-                          DMA_FROM_DEVICE) + 2;
+           dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
+                          dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN;
        wmb();
        dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
            (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
@@ -1195,6 +1195,27 @@ static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
        return 0;
 }
 
+static int
+emac_alloc_rx_skb(struct emac_instance *dev, int slot)
+{
+       struct sk_buff *skb;
+
+       skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size,
+                                         GFP_KERNEL);
+
+       return __emac_prepare_rx_skb(skb, dev, slot);
+}
+
+static int
+emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot)
+{
+       struct sk_buff *skb;
+
+       skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size);
+
+       return __emac_prepare_rx_skb(skb, dev, slot);
+}
+
 static void emac_print_link_status(struct emac_instance *dev)
 {
        if (netif_carrier_ok(dev->ndev))
@@ -1225,7 +1246,7 @@ static int emac_open(struct net_device *ndev)
 
        /* Allocate RX ring */
        for (i = 0; i < NUM_RX_BUFF; ++i)
-               if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
+               if (emac_alloc_rx_skb(dev, i)) {
                        printk(KERN_ERR "%s: failed to allocate RX ring\n",
                               ndev->name);
                        goto oom;
@@ -1660,8 +1681,9 @@ static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
        DBG2(dev, "recycle %d %d" NL, slot, len);
 
        if (len)
-               dma_map_single(&dev->ofdev->dev, skb->data - 2,
-                              EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
+               dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
+                              SKB_DATA_ALIGN(len + NET_IP_ALIGN),
+                              DMA_FROM_DEVICE);
 
        dev->rx_desc[slot].data_len = 0;
        wmb();
@@ -1713,7 +1735,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
                int len = dev->rx_desc[slot].data_len;
                int tot_len = dev->rx_sg_skb->len + len;
 
-               if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
+               if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) {
                        ++dev->estats.rx_dropped_mtu;
                        dev_kfree_skb(dev->rx_sg_skb);
                        dev->rx_sg_skb = NULL;
@@ -1769,16 +1791,18 @@ static int emac_poll_rx(void *param, int budget)
                }
 
                if (len && len < EMAC_RX_COPY_THRESH) {
-                       struct sk_buff *copy_skb =
-                           alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
+                       struct sk_buff *copy_skb;
+
+                       copy_skb = napi_alloc_skb(&dev->mal->napi, len);
                        if (unlikely(!copy_skb))
                                goto oom;
 
-                       skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
-                       memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
+                       memcpy(copy_skb->data - NET_IP_ALIGN,
+                              skb->data - NET_IP_ALIGN,
+                              len + NET_IP_ALIGN);
                        emac_recycle_rx_skb(dev, slot, len);
                        skb = copy_skb;
-               } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
+               } else if (unlikely(emac_alloc_rx_skb_napi(dev, slot)))
                        goto oom;
 
                skb_put(skb, len);
@@ -1799,7 +1823,7 @@ static int emac_poll_rx(void *param, int budget)
        sg:
                if (ctrl & MAL_RX_CTRL_FIRST) {
                        BUG_ON(dev->rx_sg_skb);
-                       if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
+                       if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) {
                                DBG(dev, "rx OOM %d" NL, slot);
                                ++dev->estats.rx_dropped_oom;
                                emac_recycle_rx_skb(dev, slot, 0);
index 84caa4a3fc52c103f52cb0b47e8f897ea262f026..187689cd8212e144892f6f11a9b694fc8042519b 100644 (file)
@@ -68,22 +68,18 @@ static inline int emac_rx_size(int mtu)
                return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
 }
 
-#define EMAC_DMA_ALIGN(x)              ALIGN((x), dma_get_cache_alignment())
-
-#define EMAC_RX_SKB_HEADROOM           \
-       EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM)
-
 /* Size of RX skb for the given MTU */
 static inline int emac_rx_skb_size(int mtu)
 {
        int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
-       return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
+
+       return SKB_DATA_ALIGN(size + NET_IP_ALIGN) + NET_SKB_PAD;
 }
 
 /* RX DMA sync size */
 static inline int emac_rx_sync_size(int mtu)
 {
-       return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2);
+       return SKB_DATA_ALIGN(emac_rx_size(mtu) + NET_IP_ALIGN);
 }
 
 /* Driver statistcs is split into two parts to make it more cache friendly: