1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* The driver transmit and receive code */
7 #include <linux/netdevice.h>
8 #include <linux/prefetch.h>
9 #include <linux/bpf_trace.h>
10 #include <net/dsfield.h>
13 #include "ice_txrx_lib.h"
16 #include "ice_trace.h"
17 #include "ice_dcb_lib.h"
19 #include "ice_eswitch.h"
21 #define ICE_RX_HDR_SIZE 256
23 #define FDIR_DESC_RXDID 0x40
24 #define ICE_FDIR_CLEAN_DELAY 10
27 * ice_prgm_fdir_fltr - Program a Flow Director filter
28 * @vsi: VSI to send dummy packet
29 * @fdir_desc: flow director descriptor
30 * @raw_packet: allocated buffer for flow director
33 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
36 struct ice_tx_buf *tx_buf, *first;
37 struct ice_fltr_desc *f_desc;
38 struct ice_tx_desc *tx_desc;
39 struct ice_tx_ring *tx_ring;
48 tx_ring = vsi->tx_rings[0];
49 if (!tx_ring || !tx_ring->desc)
53 /* we are using two descriptors to add/del a filter and we can wait */
54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
57 msleep_interruptible(1);
60 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
63 if (dma_mapping_error(dev, dma))
66 /* grab the next descriptor */
67 i = tx_ring->next_to_use;
68 first = &tx_ring->tx_buf[i];
69 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
70 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
73 i = (i < tx_ring->count) ? i : 0;
74 tx_desc = ICE_TX_DESC(tx_ring, i);
75 tx_buf = &tx_ring->tx_buf[i];
78 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
80 memset(tx_buf, 0, sizeof(*tx_buf));
81 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
82 dma_unmap_addr_set(tx_buf, dma, dma);
84 tx_desc->buf_addr = cpu_to_le64(dma);
85 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
88 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
89 tx_buf->raw_buf = raw_packet;
91 tx_desc->cmd_type_offset_bsz =
92 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
94 /* Force memory write to complete before letting h/w know
95 * there are new descriptors to fetch.
99 /* mark the data descriptor to be watched */
100 first->next_to_watch = tx_desc;
102 writel(tx_ring->next_to_use, tx_ring->tail);
108 * ice_unmap_and_free_tx_buf - Release a Tx buffer
109 * @ring: the ring that owns the buffer
110 * @tx_buf: the buffer to free
113 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
116 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
117 devm_kfree(ring->dev, tx_buf->raw_buf);
118 else if (ice_ring_is_xdp(ring))
119 page_frag_free(tx_buf->raw_buf);
121 dev_kfree_skb_any(tx_buf->skb);
122 if (dma_unmap_len(tx_buf, len))
123 dma_unmap_single(ring->dev,
124 dma_unmap_addr(tx_buf, dma),
125 dma_unmap_len(tx_buf, len),
127 } else if (dma_unmap_len(tx_buf, len)) {
128 dma_unmap_page(ring->dev,
129 dma_unmap_addr(tx_buf, dma),
130 dma_unmap_len(tx_buf, len),
134 tx_buf->next_to_watch = NULL;
136 dma_unmap_len_set(tx_buf, len, 0);
137 /* tx_buf must be completely set up in the transmit path */
140 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
142 return netdev_get_tx_queue(ring->netdev, ring->q_index);
146 * ice_clean_tx_ring - Free any empty Tx buffers
147 * @tx_ring: ring to be cleaned
149 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
154 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
155 ice_xsk_clean_xdp_ring(tx_ring);
159 /* ring already cleared, nothing to do */
160 if (!tx_ring->tx_buf)
163 /* Free all the Tx ring sk_buffs */
164 for (i = 0; i < tx_ring->count; i++)
165 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
168 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
170 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
172 /* Zero out the descriptor ring */
173 memset(tx_ring->desc, 0, size);
175 tx_ring->next_to_use = 0;
176 tx_ring->next_to_clean = 0;
177 tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
178 tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
180 if (!tx_ring->netdev)
183 /* cleanup Tx queue statistics */
184 netdev_tx_reset_queue(txring_txq(tx_ring));
188 * ice_free_tx_ring - Free Tx resources per queue
189 * @tx_ring: Tx descriptor ring for a specific queue
191 * Free all transmit software resources
193 void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
197 ice_clean_tx_ring(tx_ring);
198 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
199 tx_ring->tx_buf = NULL;
202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
204 dmam_free_coherent(tx_ring->dev, size,
205 tx_ring->desc, tx_ring->dma);
206 tx_ring->desc = NULL;
211 * ice_clean_tx_irq - Reclaim resources after transmit completes
212 * @tx_ring: Tx ring to clean
213 * @napi_budget: Used to determine if we are in netpoll
215 * Returns true if there's any budget left (e.g. the clean is finished)
217 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
219 unsigned int total_bytes = 0, total_pkts = 0;
220 unsigned int budget = ICE_DFLT_IRQ_WORK;
221 struct ice_vsi *vsi = tx_ring->vsi;
222 s16 i = tx_ring->next_to_clean;
223 struct ice_tx_desc *tx_desc;
224 struct ice_tx_buf *tx_buf;
226 /* get the bql data ready */
227 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
229 tx_buf = &tx_ring->tx_buf[i];
230 tx_desc = ICE_TX_DESC(tx_ring, i);
233 prefetch(&vsi->state);
236 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
238 /* if next_to_watch is not set then there is no work pending */
242 /* follow the guidelines of other drivers */
243 prefetchw(&tx_buf->skb->users);
245 smp_rmb(); /* prevent any other reads prior to eop_desc */
247 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
248 /* if the descriptor isn't done, no work yet to do */
249 if (!(eop_desc->cmd_type_offset_bsz &
250 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
253 /* clear next_to_watch to prevent false hangs */
254 tx_buf->next_to_watch = NULL;
256 /* update the statistics for this packet */
257 total_bytes += tx_buf->bytecount;
258 total_pkts += tx_buf->gso_segs;
261 napi_consume_skb(tx_buf->skb, napi_budget);
263 /* unmap skb header data */
264 dma_unmap_single(tx_ring->dev,
265 dma_unmap_addr(tx_buf, dma),
266 dma_unmap_len(tx_buf, len),
269 /* clear tx_buf data */
271 dma_unmap_len_set(tx_buf, len, 0);
273 /* unmap remaining buffers */
274 while (tx_desc != eop_desc) {
275 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
281 tx_buf = tx_ring->tx_buf;
282 tx_desc = ICE_TX_DESC(tx_ring, 0);
285 /* unmap any remaining paged data */
286 if (dma_unmap_len(tx_buf, len)) {
287 dma_unmap_page(tx_ring->dev,
288 dma_unmap_addr(tx_buf, dma),
289 dma_unmap_len(tx_buf, len),
291 dma_unmap_len_set(tx_buf, len, 0);
294 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
296 /* move us one more past the eop_desc for start of next pkt */
302 tx_buf = tx_ring->tx_buf;
303 tx_desc = ICE_TX_DESC(tx_ring, 0);
308 /* update budget accounting */
310 } while (likely(budget));
313 tx_ring->next_to_clean = i;
315 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
316 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
318 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
320 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
321 /* Make sure that anybody stopping the queue after this
322 * sees the new next_to_clean.
325 if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
326 !test_bit(ICE_VSI_DOWN, vsi->state)) {
327 netif_tx_wake_queue(txring_txq(tx_ring));
328 ++tx_ring->tx_stats.restart_q;
336 * ice_setup_tx_ring - Allocate the Tx descriptors
337 * @tx_ring: the Tx ring to set up
339 * Return 0 on success, negative on error
341 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
343 struct device *dev = tx_ring->dev;
349 /* warn if we are about to overwrite the pointer */
350 WARN_ON(tx_ring->tx_buf);
352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
354 if (!tx_ring->tx_buf)
357 /* round up to nearest page */
358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
362 if (!tx_ring->desc) {
363 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
368 tx_ring->next_to_use = 0;
369 tx_ring->next_to_clean = 0;
370 tx_ring->tx_stats.prev_pkt = -1;
374 devm_kfree(dev, tx_ring->tx_buf);
375 tx_ring->tx_buf = NULL;
380 * ice_clean_rx_ring - Free Rx buffers
381 * @rx_ring: ring to be cleaned
383 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
385 struct device *dev = rx_ring->dev;
389 /* ring already cleared, nothing to do */
390 if (!rx_ring->rx_buf)
394 dev_kfree_skb(rx_ring->skb);
398 if (rx_ring->xsk_pool) {
399 ice_xsk_clean_rx_ring(rx_ring);
403 /* Free all the Rx ring sk_buffs */
404 for (i = 0; i < rx_ring->count; i++) {
405 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
410 /* Invalidate cache lines that may have been written to by
411 * device so that we avoid corrupting memory.
413 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
418 /* free resources associated with mapping */
419 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
420 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
421 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
424 rx_buf->page_offset = 0;
428 if (rx_ring->xsk_pool)
429 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
431 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
433 /* Zero out the descriptor ring */
434 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
436 memset(rx_ring->desc, 0, size);
438 rx_ring->next_to_alloc = 0;
439 rx_ring->next_to_clean = 0;
440 rx_ring->next_to_use = 0;
444 * ice_free_rx_ring - Free Rx resources
445 * @rx_ring: ring to clean the resources from
447 * Free all receive software resources
449 void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
453 ice_clean_rx_ring(rx_ring);
454 if (rx_ring->vsi->type == ICE_VSI_PF)
455 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
456 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
457 rx_ring->xdp_prog = NULL;
458 if (rx_ring->xsk_pool) {
459 kfree(rx_ring->xdp_buf);
460 rx_ring->xdp_buf = NULL;
462 kfree(rx_ring->rx_buf);
463 rx_ring->rx_buf = NULL;
467 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
469 dmam_free_coherent(rx_ring->dev, size,
470 rx_ring->desc, rx_ring->dma);
471 rx_ring->desc = NULL;
476 * ice_setup_rx_ring - Allocate the Rx descriptors
477 * @rx_ring: the Rx ring to set up
479 * Return 0 on success, negative on error
481 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
483 struct device *dev = rx_ring->dev;
489 /* warn if we are about to overwrite the pointer */
490 WARN_ON(rx_ring->rx_buf);
492 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
493 if (!rx_ring->rx_buf)
496 /* round up to nearest page */
497 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
499 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
501 if (!rx_ring->desc) {
502 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
507 rx_ring->next_to_use = 0;
508 rx_ring->next_to_clean = 0;
510 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
511 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
513 if (rx_ring->vsi->type == ICE_VSI_PF &&
514 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
515 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
516 rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
521 kfree(rx_ring->rx_buf);
522 rx_ring->rx_buf = NULL;
527 ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
529 unsigned int truesize;
531 #if (PAGE_SIZE < 8192)
532 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
534 truesize = rx_ring->rx_offset ?
535 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
536 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
537 SKB_DATA_ALIGN(size);
543 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
545 * @xdp: xdp_buff used as input to the XDP program
546 * @xdp_prog: XDP program to run
547 * @xdp_ring: ring to be used for XDP_TX action
549 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
552 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
553 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
558 act = bpf_prog_run_xdp(xdp_prog, xdp);
563 if (static_branch_unlikely(&ice_xdp_locking_key))
564 spin_lock(&xdp_ring->tx_lock);
565 err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
566 if (static_branch_unlikely(&ice_xdp_locking_key))
567 spin_unlock(&xdp_ring->tx_lock);
568 if (err == ICE_XDP_CONSUMED)
572 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
575 return ICE_XDP_REDIR;
577 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
581 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
584 return ICE_XDP_CONSUMED;
589 * ice_xdp_xmit - submit packets to XDP ring for transmission
591 * @n: number of XDP frames to be transmitted
592 * @frames: XDP frames to be transmitted
593 * @flags: transmit flags
595 * Returns number of frames successfully sent. Failed frames
596 * will be free'ed by XDP core.
597 * For error cases, a negative errno code is returned and no-frames
598 * are transmitted (caller must handle freeing frames).
601 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
604 struct ice_netdev_priv *np = netdev_priv(dev);
605 unsigned int queue_index = smp_processor_id();
606 struct ice_vsi *vsi = np->vsi;
607 struct ice_tx_ring *xdp_ring;
610 if (test_bit(ICE_VSI_DOWN, vsi->state))
613 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
616 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
619 if (static_branch_unlikely(&ice_xdp_locking_key)) {
620 queue_index %= vsi->num_xdp_txq;
621 xdp_ring = vsi->xdp_rings[queue_index];
622 spin_lock(&xdp_ring->tx_lock);
624 xdp_ring = vsi->xdp_rings[queue_index];
627 for (i = 0; i < n; i++) {
628 struct xdp_frame *xdpf = frames[i];
631 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
632 if (err != ICE_XDP_TX)
637 if (unlikely(flags & XDP_XMIT_FLUSH))
638 ice_xdp_ring_update_tail(xdp_ring);
640 if (static_branch_unlikely(&ice_xdp_locking_key))
641 spin_unlock(&xdp_ring->tx_lock);
647 * ice_alloc_mapped_page - recycle or make a new page
648 * @rx_ring: ring to use
649 * @bi: rx_buf struct to modify
651 * Returns true if the page was successfully allocated or
655 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
657 struct page *page = bi->page;
660 /* since we are recycling buffers we should seldom need to alloc */
664 /* alloc new page for storage */
665 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
666 if (unlikely(!page)) {
667 rx_ring->rx_stats.alloc_page_failed++;
671 /* map page for use */
672 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
673 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
675 /* if mapping failed free memory back to system since
676 * there isn't much point in holding memory we can't use
678 if (dma_mapping_error(rx_ring->dev, dma)) {
679 __free_pages(page, ice_rx_pg_order(rx_ring));
680 rx_ring->rx_stats.alloc_page_failed++;
686 bi->page_offset = rx_ring->rx_offset;
687 page_ref_add(page, USHRT_MAX - 1);
688 bi->pagecnt_bias = USHRT_MAX;
694 * ice_alloc_rx_bufs - Replace used receive buffers
695 * @rx_ring: ring to place buffers on
696 * @cleaned_count: number of buffers to replace
698 * Returns false if all allocations were successful, true if any fail. Returning
699 * true signals to the caller that we didn't replace cleaned_count buffers and
700 * there is more work to do.
702 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
703 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
704 * multiple tail writes per call.
706 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
708 union ice_32b_rx_flex_desc *rx_desc;
709 u16 ntu = rx_ring->next_to_use;
710 struct ice_rx_buf *bi;
712 /* do nothing if no valid netdev defined */
713 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
717 /* get the Rx descriptor and buffer based on next_to_use */
718 rx_desc = ICE_RX_DESC(rx_ring, ntu);
719 bi = &rx_ring->rx_buf[ntu];
722 /* if we fail here, we have work remaining */
723 if (!ice_alloc_mapped_page(rx_ring, bi))
726 /* sync the buffer for use by the device */
727 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
732 /* Refresh the desc even if buffer_addrs didn't change
733 * because each write-back erases this info.
735 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
740 if (unlikely(ntu == rx_ring->count)) {
741 rx_desc = ICE_RX_DESC(rx_ring, 0);
742 bi = rx_ring->rx_buf;
746 /* clear the status bits for the next_to_use descriptor */
747 rx_desc->wb.status_error0 = 0;
750 } while (cleaned_count);
752 if (rx_ring->next_to_use != ntu)
753 ice_release_rx_desc(rx_ring, ntu);
755 return !!cleaned_count;
759 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
760 * @rx_buf: Rx buffer to adjust
761 * @size: Size of adjustment
763 * Update the offset within page so that Rx buf will be ready to be reused.
764 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
765 * so the second half of page assigned to Rx buffer will be used, otherwise
766 * the offset is moved by "size" bytes
769 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
771 #if (PAGE_SIZE < 8192)
772 /* flip page offset to other buffer */
773 rx_buf->page_offset ^= size;
775 /* move offset up to the next cache line */
776 rx_buf->page_offset += size;
781 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
782 * @rx_buf: buffer containing the page
783 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
785 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
786 * which will assign the current buffer to the buffer that next_to_alloc is
787 * pointing to; otherwise, the DMA mapping needs to be destroyed and
791 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
793 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
794 struct page *page = rx_buf->page;
796 /* avoid re-using remote and pfmemalloc pages */
797 if (!dev_page_is_reusable(page))
800 #if (PAGE_SIZE < 8192)
801 /* if we are only owner of page we can reuse it */
802 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
805 #define ICE_LAST_OFFSET \
806 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
807 if (rx_buf->page_offset > ICE_LAST_OFFSET)
809 #endif /* PAGE_SIZE < 8192) */
811 /* If we have drained the page fragment pool we need to update
812 * the pagecnt_bias and page count so that we fully restock the
813 * number of references the driver holds.
815 if (unlikely(pagecnt_bias == 1)) {
816 page_ref_add(page, USHRT_MAX - 1);
817 rx_buf->pagecnt_bias = USHRT_MAX;
824 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
825 * @rx_ring: Rx descriptor ring to transact packets on
826 * @rx_buf: buffer containing page to add
827 * @skb: sk_buff to place the data into
828 * @size: packet length from rx_desc
830 * This function will add the data contained in rx_buf->page to the skb.
831 * It will just attach the page as a frag to the skb.
832 * The function will then update the page offset.
835 ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
836 struct sk_buff *skb, unsigned int size)
838 #if (PAGE_SIZE >= 8192)
839 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
841 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
846 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
847 rx_buf->page_offset, size, truesize);
849 /* page is being used so we must update the page offset */
850 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
854 * ice_reuse_rx_page - page flip buffer and store it back on the ring
855 * @rx_ring: Rx descriptor ring to store buffers on
856 * @old_buf: donor buffer to have page reused
858 * Synchronizes page for reuse by the adapter
861 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
863 u16 nta = rx_ring->next_to_alloc;
864 struct ice_rx_buf *new_buf;
866 new_buf = &rx_ring->rx_buf[nta];
868 /* update, and store next to alloc */
870 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
872 /* Transfer page from old buffer to new buffer.
873 * Move each member individually to avoid possible store
874 * forwarding stalls and unnecessary copy of skb.
876 new_buf->dma = old_buf->dma;
877 new_buf->page = old_buf->page;
878 new_buf->page_offset = old_buf->page_offset;
879 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
883 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
884 * @rx_ring: Rx descriptor ring to transact packets on
885 * @size: size of buffer to add to skb
886 * @rx_buf_pgcnt: rx_buf page refcount
888 * This function will pull an Rx buffer from the ring and synchronize it
889 * for use by the CPU.
891 static struct ice_rx_buf *
892 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
895 struct ice_rx_buf *rx_buf;
897 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
899 #if (PAGE_SIZE < 8192)
900 page_count(rx_buf->page);
904 prefetchw(rx_buf->page);
908 /* we are reusing so sync this buffer for CPU use */
909 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
910 rx_buf->page_offset, size,
913 /* We have pulled a buffer for use, so decrement pagecnt_bias */
914 rx_buf->pagecnt_bias--;
920 * ice_build_skb - Build skb around an existing buffer
921 * @rx_ring: Rx descriptor ring to transact packets on
922 * @rx_buf: Rx buffer to pull data from
923 * @xdp: xdp_buff pointing to the data
925 * This function builds an skb around an existing Rx buffer, taking care
926 * to set up the skb correctly and avoid any memcpy overhead.
928 static struct sk_buff *
929 ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
930 struct xdp_buff *xdp)
932 u8 metasize = xdp->data - xdp->data_meta;
933 #if (PAGE_SIZE < 8192)
934 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
936 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
937 SKB_DATA_ALIGN(xdp->data_end -
938 xdp->data_hard_start);
942 /* Prefetch first cache line of first page. If xdp->data_meta
943 * is unused, this points exactly as xdp->data, otherwise we
944 * likely have a consumer accessing first few bytes of meta
945 * data, and then actual data.
947 net_prefetch(xdp->data_meta);
948 /* build an skb around the page buffer */
949 skb = napi_build_skb(xdp->data_hard_start, truesize);
953 /* must to record Rx queue, otherwise OS features such as
954 * symmetric queue won't work
956 skb_record_rx_queue(skb, rx_ring->q_index);
958 /* update pointers within the skb to store the data */
959 skb_reserve(skb, xdp->data - xdp->data_hard_start);
960 __skb_put(skb, xdp->data_end - xdp->data);
962 skb_metadata_set(skb, metasize);
964 /* buffer is used by skb, update page_offset */
965 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
971 * ice_construct_skb - Allocate skb and populate it
972 * @rx_ring: Rx descriptor ring to transact packets on
973 * @rx_buf: Rx buffer to pull data from
974 * @xdp: xdp_buff pointing to the data
976 * This function allocates an skb. It then populates it with the page
977 * data from the current receive descriptor, taking care to set up the
980 static struct sk_buff *
981 ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
982 struct xdp_buff *xdp)
984 unsigned int metasize = xdp->data - xdp->data_meta;
985 unsigned int size = xdp->data_end - xdp->data;
986 unsigned int headlen;
989 /* prefetch first cache line of first page */
990 net_prefetch(xdp->data_meta);
992 /* allocate a skb to store the frags */
993 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
994 ICE_RX_HDR_SIZE + metasize,
995 GFP_ATOMIC | __GFP_NOWARN);
999 skb_record_rx_queue(skb, rx_ring->q_index);
1000 /* Determine available headroom for copy */
1002 if (headlen > ICE_RX_HDR_SIZE)
1003 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
1005 /* align pull length to size of long to optimize memcpy performance */
1006 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
1007 ALIGN(headlen + metasize, sizeof(long)));
1010 skb_metadata_set(skb, metasize);
1011 __skb_pull(skb, metasize);
1014 /* if we exhaust the linear part then add what is left as a frag */
1017 #if (PAGE_SIZE >= 8192)
1018 unsigned int truesize = SKB_DATA_ALIGN(size);
1020 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
1022 skb_add_rx_frag(skb, 0, rx_buf->page,
1023 rx_buf->page_offset + headlen, size, truesize);
1024 /* buffer is used by skb, update page_offset */
1025 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1027 /* buffer is unused, reset bias back to rx_buf; data was copied
1028 * onto skb's linear part so there's no need for adjusting
1029 * page offset and we can reuse this buffer as-is
1031 rx_buf->pagecnt_bias++;
1038 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1039 * @rx_ring: Rx descriptor ring to transact packets on
1040 * @rx_buf: Rx buffer to pull data from
1041 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
1043 * This function will update next_to_clean and then clean up the contents
1044 * of the rx_buf. It will either recycle the buffer or unmap it and free
1045 * the associated resources.
1048 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
1051 u16 ntc = rx_ring->next_to_clean + 1;
1053 /* fetch, update, and store next to clean */
1054 ntc = (ntc < rx_ring->count) ? ntc : 0;
1055 rx_ring->next_to_clean = ntc;
1060 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1061 /* hand second half of page back to the ring */
1062 ice_reuse_rx_page(rx_ring, rx_buf);
1064 /* we are not reusing the buffer so unmap it */
1065 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1066 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1068 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1071 /* clear contents of buffer_info */
1072 rx_buf->page = NULL;
1076 * ice_is_non_eop - process handling of non-EOP buffers
1077 * @rx_ring: Rx ring being processed
1078 * @rx_desc: Rx descriptor for current buffer
1080 * If the buffer is an EOP buffer, this function exits returning false,
1081 * otherwise return true indicating that this is in fact a non-EOP buffer.
1084 ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1086 /* if we are the last buffer then there is nothing else to do */
1087 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1088 if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
1091 rx_ring->rx_stats.non_eop_descs++;
1097 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1098 * @rx_ring: Rx descriptor ring to transact packets on
1099 * @budget: Total limit on number of packets to process
1101 * This function provides a "bounce buffer" approach to Rx interrupt
1102 * processing. The advantage to this is that on systems that have
1103 * expensive overhead for IOMMU access this provides a means of avoiding
1104 * it by maintaining the mapping of the page to the system.
1106 * Returns amount of work completed
1108 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1110 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1111 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1112 unsigned int offset = rx_ring->rx_offset;
1113 struct ice_tx_ring *xdp_ring = NULL;
1114 unsigned int xdp_res, xdp_xmit = 0;
1115 struct sk_buff *skb = rx_ring->skb;
1116 struct bpf_prog *xdp_prog = NULL;
1117 struct xdp_buff xdp;
1120 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1121 #if (PAGE_SIZE < 8192)
1122 frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1124 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1126 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1128 xdp_ring = rx_ring->xdp_ring;
1130 /* start the loop to process Rx packets bounded by 'budget' */
1131 while (likely(total_rx_pkts < (unsigned int)budget)) {
1132 union ice_32b_rx_flex_desc *rx_desc;
1133 struct ice_rx_buf *rx_buf;
1134 unsigned char *hard_start;
1141 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1142 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1144 /* status_error_len will always be zero for unused descriptors
1145 * because it's cleared in cleanup, and overlaps with hdr_addr
1146 * which is always zero because packet split isn't used, if the
1147 * hardware wrote DD then it will be non-zero
1149 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1150 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
1153 /* This memory barrier is needed to keep us from reading
1154 * any other fields out of the rx_desc until we know the
1159 ice_trace(clean_rx_irq, rx_ring, rx_desc);
1160 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1161 struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1163 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1165 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1166 ice_put_rx_buf(rx_ring, NULL, 0);
1171 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1172 ICE_RX_FLX_DESC_PKT_LEN_M;
1174 /* retrieve a buffer from the ring */
1175 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1179 xdp.data_end = NULL;
1180 xdp.data_hard_start = NULL;
1181 xdp.data_meta = NULL;
1185 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1187 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1188 #if (PAGE_SIZE > 4096)
1189 /* At larger PAGE_SIZE, frame_sz depend on len size */
1190 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1196 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
1199 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1200 xdp_xmit |= xdp_res;
1201 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1203 rx_buf->pagecnt_bias++;
1205 total_rx_bytes += size;
1209 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1213 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1214 } else if (likely(xdp.data)) {
1215 if (ice_ring_uses_build_skb(rx_ring))
1216 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1218 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1220 /* exit if we failed to retrieve a buffer */
1222 rx_ring->rx_stats.alloc_buf_failed++;
1224 rx_buf->pagecnt_bias++;
1228 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1231 /* skip if it is NOP desc */
1232 if (ice_is_non_eop(rx_ring, rx_desc))
1235 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1236 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
1238 dev_kfree_skb_any(skb);
1242 vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
1244 /* pad the skb if needed, to make a valid ethernet frame */
1245 if (eth_skb_pad(skb)) {
1250 /* probably a little skewed due to removing CRC */
1251 total_rx_bytes += skb->len;
1253 /* populate checksum, VLAN, and protocol */
1254 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1255 ICE_RX_FLEX_DESC_PTYPE_M;
1257 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1259 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1260 /* send completed skb up the stack */
1261 ice_receive_skb(rx_ring, skb, vlan_tag);
1264 /* update budget accounting */
1268 /* return up to cleaned_count buffers to hardware */
1269 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1272 ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
1275 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1277 /* guarantee a trip back through this routine if there was a failure */
1278 return failure ? budget : (int)total_rx_pkts;
1281 static void __ice_update_sample(struct ice_q_vector *q_vector,
1282 struct ice_ring_container *rc,
1283 struct dim_sample *sample,
1286 u64 packets = 0, bytes = 0;
1289 struct ice_tx_ring *tx_ring;
1291 ice_for_each_tx_ring(tx_ring, *rc) {
1292 packets += tx_ring->stats.pkts;
1293 bytes += tx_ring->stats.bytes;
1296 struct ice_rx_ring *rx_ring;
1298 ice_for_each_rx_ring(rx_ring, *rc) {
1299 packets += rx_ring->stats.pkts;
1300 bytes += rx_ring->stats.bytes;
1304 dim_update_sample(q_vector->total_events, packets, bytes, sample);
1305 sample->comp_ctr = 0;
1307 /* if dim settings get stale, like when not updated for 1
1308 * second or longer, force it to start again. This addresses the
1309 * frequent case of an idle queue being switched to by the
1310 * scheduler. The 1,000 here means 1,000 milliseconds.
1312 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
1313 rc->dim.state = DIM_START_MEASURE;
1317 * ice_net_dim - Update net DIM algorithm
1318 * @q_vector: the vector associated with the interrupt
1320 * Create a DIM sample and notify net_dim() so that it can possibly decide
1321 * a new ITR value based on incoming packets, bytes, and interrupts.
1323 * This function is a no-op if the ring is not configured to dynamic ITR.
1325 static void ice_net_dim(struct ice_q_vector *q_vector)
1327 struct ice_ring_container *tx = &q_vector->tx;
1328 struct ice_ring_container *rx = &q_vector->rx;
1330 if (ITR_IS_DYNAMIC(tx)) {
1331 struct dim_sample dim_sample;
1333 __ice_update_sample(q_vector, tx, &dim_sample, true);
1334 net_dim(&tx->dim, dim_sample);
1337 if (ITR_IS_DYNAMIC(rx)) {
1338 struct dim_sample dim_sample;
1340 __ice_update_sample(q_vector, rx, &dim_sample, false);
1341 net_dim(&rx->dim, dim_sample);
1346 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1347 * @itr_idx: interrupt throttling index
1348 * @itr: interrupt throttling value in usecs
1350 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1352 /* The ITR value is reported in microseconds, and the register value is
1353 * recorded in 2 microsecond units. For this reason we only need to
1354 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1355 * granularity as a shift instead of division. The mask makes sure the
1356 * ITR value is never odd so we don't accidentally write into the field
1357 * prior to the ITR field.
1359 itr &= ICE_ITR_MASK;
1361 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1362 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1363 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1367 * ice_enable_interrupt - re-enable MSI-X interrupt
1368 * @q_vector: the vector associated with the interrupt to enable
1370 * If the VSI is down, the interrupt will not be re-enabled. Also,
1371 * when enabling the interrupt always reset the wb_on_itr to false
1372 * and trigger a software interrupt to clean out internal state.
1374 static void ice_enable_interrupt(struct ice_q_vector *q_vector)
1376 struct ice_vsi *vsi = q_vector->vsi;
1377 bool wb_en = q_vector->wb_on_itr;
1380 if (test_bit(ICE_DOWN, vsi->state))
1383 /* trigger an ITR delayed software interrupt when exiting busy poll, to
1384 * make sure to catch any pending cleanups that might have been missed
1385 * due to interrupt state transition. If busy poll or poll isn't
1386 * enabled, then don't update ITR, and just enable the interrupt.
1389 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1391 q_vector->wb_on_itr = false;
1393 /* do two things here with a single write. Set up the third ITR
1394 * index to be used for software interrupt moderation, and then
1395 * trigger a software interrupt with a rate limit of 20K on
1396 * software interrupts, this will help avoid high interrupt
1397 * loads due to frequently polling and exiting polling.
1399 itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
1400 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1401 ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
1402 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1404 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1408 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1409 * @q_vector: q_vector to set WB_ON_ITR on
1411 * We need to tell hardware to write-back completed descriptors even when
1412 * interrupts are disabled. Descriptors will be written back on cache line
1413 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1414 * descriptors may not be written back if they don't fill a cache line until
1415 * the next interrupt.
1417 * This sets the write-back frequency to whatever was set previously for the
1418 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1419 * aren't meddling with the INTENA_M bit.
1421 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1423 struct ice_vsi *vsi = q_vector->vsi;
1425 /* already in wb_on_itr mode no need to change it */
1426 if (q_vector->wb_on_itr)
1429 /* use previously set ITR values for all of the ITR indices by
1430 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1431 * be static in non-adaptive mode (user configured)
1433 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1434 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1435 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1436 GLINT_DYN_CTL_WB_ON_ITR_M);
1438 q_vector->wb_on_itr = true;
1442 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1443 * @napi: napi struct with our devices info in it
1444 * @budget: amount of work driver is allowed to do this pass, in packets
1446 * This function will clean all queues associated with a q_vector.
1448 * Returns the amount of work done
1450 int ice_napi_poll(struct napi_struct *napi, int budget)
1452 struct ice_q_vector *q_vector =
1453 container_of(napi, struct ice_q_vector, napi);
1454 struct ice_tx_ring *tx_ring;
1455 struct ice_rx_ring *rx_ring;
1456 bool clean_complete = true;
1457 int budget_per_ring;
1460 /* Since the actual Tx work is minimal, we can give the Tx a larger
1461 * budget and be more aggressive about cleaning up the Tx descriptors.
1463 ice_for_each_tx_ring(tx_ring, q_vector->tx) {
1466 if (tx_ring->xsk_pool)
1467 wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
1468 else if (ice_ring_is_xdp(tx_ring))
1471 wd = ice_clean_tx_irq(tx_ring, budget);
1474 clean_complete = false;
1477 /* Handle case where we are called by netpoll with a budget of 0 */
1478 if (unlikely(budget <= 0))
1481 /* normally we have 1 Rx ring per q_vector */
1482 if (unlikely(q_vector->num_ring_rx > 1))
1483 /* We attempt to distribute budget to each Rx queue fairly, but
1484 * don't allow the budget to go below 1 because that would exit
1487 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1489 /* Max of 1 Rx ring in this q_vector so give it the budget */
1490 budget_per_ring = budget;
1492 ice_for_each_rx_ring(rx_ring, q_vector->rx) {
1495 /* A dedicated path for zero-copy allows making a single
1496 * comparison in the irq context instead of many inside the
1497 * ice_clean_rx_irq function and makes the codebase cleaner.
1499 cleaned = rx_ring->xsk_pool ?
1500 ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
1501 ice_clean_rx_irq(rx_ring, budget_per_ring);
1502 work_done += cleaned;
1503 /* if we clean as many as budgeted, we must not be done */
1504 if (cleaned >= budget_per_ring)
1505 clean_complete = false;
1508 /* If work not completed, return budget and polling will return */
1509 if (!clean_complete) {
1510 /* Set the writeback on ITR so partial completions of
1511 * cache-lines will still continue even if we're polling.
1513 ice_set_wb_on_itr(q_vector);
1517 /* Exit the polling mode, but don't re-enable interrupts if stack might
1518 * poll us due to busy-polling
1520 if (napi_complete_done(napi, work_done)) {
1521 ice_net_dim(q_vector);
1522 ice_enable_interrupt(q_vector);
1524 ice_set_wb_on_itr(q_vector);
1527 return min_t(int, work_done, budget - 1);
1531 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1532 * @tx_ring: the ring to be checked
1533 * @size: the size buffer we want to assure is available
1535 * Returns -EBUSY if a stop is needed, else 0
1537 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1539 netif_tx_stop_queue(txring_txq(tx_ring));
1540 /* Memory barrier before checking head and tail */
1543 /* Check again in a case another CPU has just made room available. */
1544 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1547 /* A reprieve! - use start_queue because it doesn't call schedule */
1548 netif_tx_start_queue(txring_txq(tx_ring));
1549 ++tx_ring->tx_stats.restart_q;
1554 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1555 * @tx_ring: the ring to be checked
1556 * @size: the size buffer we want to assure is available
1558 * Returns 0 if stop is not needed
1560 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1562 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1565 return __ice_maybe_stop_tx(tx_ring, size);
1569 * ice_tx_map - Build the Tx descriptor
1570 * @tx_ring: ring to send buffer on
1571 * @first: first buffer info buffer to use
1572 * @off: pointer to struct that holds offload parameters
1574 * This function loops over the skb data pointed to by *first
1575 * and gets a physical address for each memory location and programs
1576 * it and the length into the transmit descriptor.
1579 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
1580 struct ice_tx_offload_params *off)
1582 u64 td_offset, td_tag, td_cmd;
1583 u16 i = tx_ring->next_to_use;
1584 unsigned int data_len, size;
1585 struct ice_tx_desc *tx_desc;
1586 struct ice_tx_buf *tx_buf;
1587 struct sk_buff *skb;
1592 td_tag = off->td_l2tag1;
1593 td_cmd = off->td_cmd;
1594 td_offset = off->td_offset;
1597 data_len = skb->data_len;
1598 size = skb_headlen(skb);
1600 tx_desc = ICE_TX_DESC(tx_ring, i);
1602 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1603 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1604 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1605 ICE_TX_FLAGS_VLAN_S;
1608 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1612 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1613 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1615 if (dma_mapping_error(tx_ring->dev, dma))
1618 /* record length, and DMA address */
1619 dma_unmap_len_set(tx_buf, len, size);
1620 dma_unmap_addr_set(tx_buf, dma, dma);
1622 /* align size to end of page */
1623 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1624 tx_desc->buf_addr = cpu_to_le64(dma);
1626 /* account for data chunks larger than the hardware
1629 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1630 tx_desc->cmd_type_offset_bsz =
1631 ice_build_ctob(td_cmd, td_offset, max_data,
1637 if (i == tx_ring->count) {
1638 tx_desc = ICE_TX_DESC(tx_ring, 0);
1645 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1646 tx_desc->buf_addr = cpu_to_le64(dma);
1649 if (likely(!data_len))
1652 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1658 if (i == tx_ring->count) {
1659 tx_desc = ICE_TX_DESC(tx_ring, 0);
1663 size = skb_frag_size(frag);
1666 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1669 tx_buf = &tx_ring->tx_buf[i];
1672 /* record SW timestamp if HW timestamp is not available */
1673 skb_tx_timestamp(first->skb);
1676 if (i == tx_ring->count)
1679 /* write last descriptor with RS and EOP bits */
1680 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1681 tx_desc->cmd_type_offset_bsz =
1682 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1684 /* Force memory writes to complete before letting h/w know there
1685 * are new descriptors to fetch.
1687 * We also use this memory barrier to make certain all of the
1688 * status bits have been updated before next_to_watch is written.
1692 /* set next_to_watch value indicating a packet is present */
1693 first->next_to_watch = tx_desc;
1695 tx_ring->next_to_use = i;
1697 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1699 /* notify HW of packet */
1700 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
1701 netdev_xmit_more());
1703 /* notify HW of packet */
1704 writel(i, tx_ring->tail);
1709 /* clear DMA mappings for failed tx_buf map */
1711 tx_buf = &tx_ring->tx_buf[i];
1712 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1713 if (tx_buf == first)
1720 tx_ring->next_to_use = i;
1724 * ice_tx_csum - Enable Tx checksum offloads
1725 * @first: pointer to the first descriptor
1726 * @off: pointer to struct that holds offload parameters
1728 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1731 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1733 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1734 struct sk_buff *skb = first->skb;
1744 __be16 frag_off, protocol;
1745 unsigned char *exthdr;
1746 u32 offset, cmd = 0;
1749 if (skb->ip_summed != CHECKSUM_PARTIAL)
1752 protocol = vlan_get_protocol(skb);
1754 if (eth_p_mpls(protocol)) {
1755 ip.hdr = skb_inner_network_header(skb);
1756 l4.hdr = skb_checksum_start(skb);
1758 ip.hdr = skb_network_header(skb);
1759 l4.hdr = skb_transport_header(skb);
1762 /* compute outer L2 header size */
1763 l2_len = ip.hdr - skb->data;
1764 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1766 /* set the tx_flags to indicate the IP protocol type. this is
1767 * required so that checksum header computation below is accurate.
1769 if (ip.v4->version == 4)
1770 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1771 else if (ip.v6->version == 6)
1772 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1774 if (skb->encapsulation) {
1775 bool gso_ena = false;
1778 /* define outer network header type */
1779 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1780 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1781 ICE_TX_CTX_EIPT_IPV4 :
1782 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1783 l4_proto = ip.v4->protocol;
1784 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1787 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1788 exthdr = ip.hdr + sizeof(*ip.v6);
1789 l4_proto = ip.v6->nexthdr;
1790 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1791 &l4_proto, &frag_off);
1796 /* define outer transport */
1799 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1800 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1803 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1804 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1808 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1809 l4.hdr = skb_inner_network_header(skb);
1812 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1815 skb_checksum_help(skb);
1819 /* compute outer L3 header size */
1820 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1821 ICE_TXD_CTX_QW0_EIPLEN_S;
1823 /* switch IP header pointer from outer to inner header */
1824 ip.hdr = skb_inner_network_header(skb);
1826 /* compute tunnel header size */
1827 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1828 ICE_TXD_CTX_QW0_NATLEN_S;
1830 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1831 /* indicate if we need to offload outer UDP header */
1832 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1833 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1834 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1836 /* record tunnel offload values */
1837 off->cd_tunnel_params |= tunnel;
1839 /* set DTYP=1 to indicate that it's an Tx context descriptor
1840 * in IPsec tunnel mode with Tx offloads in Quad word 1
1842 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1844 /* switch L4 header pointer from outer to inner */
1845 l4.hdr = skb_inner_transport_header(skb);
1848 /* reset type as we transition from outer to inner headers */
1849 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1850 if (ip.v4->version == 4)
1851 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1852 if (ip.v6->version == 6)
1853 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1856 /* Enable IP checksum offloads */
1857 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1858 l4_proto = ip.v4->protocol;
1859 /* the stack computes the IP header already, the only time we
1860 * need the hardware to recompute it is in the case of TSO.
1862 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1863 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1865 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1867 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1868 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1869 exthdr = ip.hdr + sizeof(*ip.v6);
1870 l4_proto = ip.v6->nexthdr;
1871 if (l4.hdr != exthdr)
1872 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1878 /* compute inner L3 header size */
1879 l3_len = l4.hdr - ip.hdr;
1880 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1882 /* Enable L4 checksum offloads */
1885 /* enable checksum offloads */
1886 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1887 l4_len = l4.tcp->doff;
1888 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1891 /* enable UDP checksum offload */
1892 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1893 l4_len = (sizeof(struct udphdr) >> 2);
1894 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1897 /* enable SCTP checksum offload */
1898 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1899 l4_len = sizeof(struct sctphdr) >> 2;
1900 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1904 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1906 skb_checksum_help(skb);
1911 off->td_offset |= offset;
1916 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1917 * @tx_ring: ring to send buffer on
1918 * @first: pointer to struct ice_tx_buf
1920 * Checks the skb and set up correspondingly several generic transmit flags
1921 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1924 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
1926 struct sk_buff *skb = first->skb;
1928 /* nothing left to do, software offloaded VLAN */
1929 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1932 /* the VLAN ethertype/tpid is determined by VSI configuration and netdev
1933 * feature flags, which the driver only allows either 802.1Q or 802.1ad
1934 * VLAN offloads exclusively so we only care about the VLAN ID here
1936 if (skb_vlan_tag_present(skb)) {
1937 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1938 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
1939 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
1941 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1944 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1948 * ice_tso - computes mss and TSO length to prepare for TSO
1949 * @first: pointer to struct ice_tx_buf
1950 * @off: pointer to struct that holds offload parameters
1952 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1955 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1957 struct sk_buff *skb = first->skb;
1968 u64 cd_mss, cd_tso_len;
1974 if (skb->ip_summed != CHECKSUM_PARTIAL)
1977 if (!skb_is_gso(skb))
1980 err = skb_cow_head(skb, 0);
1984 /* cppcheck-suppress unreadVariable */
1985 protocol = vlan_get_protocol(skb);
1987 if (eth_p_mpls(protocol))
1988 ip.hdr = skb_inner_network_header(skb);
1990 ip.hdr = skb_network_header(skb);
1991 l4.hdr = skb_checksum_start(skb);
1993 /* initialize outer IP header fields */
1994 if (ip.v4->version == 4) {
1998 ip.v6->payload_len = 0;
2001 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2005 SKB_GSO_UDP_TUNNEL |
2006 SKB_GSO_UDP_TUNNEL_CSUM)) {
2007 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2008 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2011 /* determine offset of outer transport header */
2012 l4_start = (u8)(l4.hdr - skb->data);
2014 /* remove payload length from outer checksum */
2015 paylen = skb->len - l4_start;
2016 csum_replace_by_diff(&l4.udp->check,
2017 (__force __wsum)htonl(paylen));
2020 /* reset pointers to inner headers */
2022 /* cppcheck-suppress unreadVariable */
2023 ip.hdr = skb_inner_network_header(skb);
2024 l4.hdr = skb_inner_transport_header(skb);
2026 /* initialize inner IP header fields */
2027 if (ip.v4->version == 4) {
2031 ip.v6->payload_len = 0;
2035 /* determine offset of transport header */
2036 l4_start = (u8)(l4.hdr - skb->data);
2038 /* remove payload length from checksum */
2039 paylen = skb->len - l4_start;
2041 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2042 csum_replace_by_diff(&l4.udp->check,
2043 (__force __wsum)htonl(paylen));
2044 /* compute length of UDP segmentation header */
2045 off->header_len = (u8)sizeof(l4.udp) + l4_start;
2047 csum_replace_by_diff(&l4.tcp->check,
2048 (__force __wsum)htonl(paylen));
2049 /* compute length of TCP segmentation header */
2050 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2053 /* update gso_segs and bytecount */
2054 first->gso_segs = skb_shinfo(skb)->gso_segs;
2055 first->bytecount += (first->gso_segs - 1) * off->header_len;
2057 cd_tso_len = skb->len - off->header_len;
2058 cd_mss = skb_shinfo(skb)->gso_size;
2060 /* record cdesc_qw1 with TSO parameters */
2061 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2062 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2063 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2064 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2065 first->tx_flags |= ICE_TX_FLAGS_TSO;
2070 * ice_txd_use_count - estimate the number of descriptors needed for Tx
2071 * @size: transmit request size in bytes
2073 * Due to hardware alignment restrictions (4K alignment), we need to
2074 * assume that we can have no more than 12K of data per descriptor, even
2075 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2076 * Thus, we need to divide by 12K. But division is slow! Instead,
2077 * we decompose the operation into shifts and one relatively cheap
2078 * multiply operation.
2080 * To divide by 12K, we first divide by 4K, then divide by 3:
2081 * To divide by 4K, shift right by 12 bits
2082 * To divide by 3, multiply by 85, then divide by 256
2083 * (Divide by 256 is done by shifting right by 8 bits)
2084 * Finally, we add one to round up. Because 256 isn't an exact multiple of
2085 * 3, we'll underestimate near each multiple of 12K. This is actually more
2086 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2087 * segment. For our purposes this is accurate out to 1M which is orders of
2088 * magnitude greater than our largest possible GSO size.
2090 * This would then be implemented as:
2091 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2093 * Since multiplication and division are commutative, we can reorder
2095 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2097 static unsigned int ice_txd_use_count(unsigned int size)
2099 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2103 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2106 * Returns number of data descriptors needed for this skb.
2108 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2110 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2111 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2112 unsigned int count = 0, size = skb_headlen(skb);
2115 count += ice_txd_use_count(size);
2120 size = skb_frag_size(frag++);
2127 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2130 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2131 * and so we need to figure out the cases where we need to linearize the skb.
2133 * For TSO we need to count the TSO header and segment payload separately.
2134 * As such we need to check cases where we have 7 fragments or more as we
2135 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2136 * the segment payload in the first descriptor, and another 7 for the
2139 static bool __ice_chk_linearize(struct sk_buff *skb)
2141 const skb_frag_t *frag, *stale;
2144 /* no need to check if number of frags is less than 7 */
2145 nr_frags = skb_shinfo(skb)->nr_frags;
2146 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2149 /* We need to walk through the list and validate that each group
2150 * of 6 fragments totals at least gso_size.
2152 nr_frags -= ICE_MAX_BUF_TXD - 2;
2153 frag = &skb_shinfo(skb)->frags[0];
2155 /* Initialize size to the negative value of gso_size minus 1. We
2156 * use this as the worst case scenario in which the frag ahead
2157 * of us only provides one byte which is why we are limited to 6
2158 * descriptors for a single transmit as the header and previous
2159 * fragment are already consuming 2 descriptors.
2161 sum = 1 - skb_shinfo(skb)->gso_size;
2163 /* Add size of frags 0 through 4 to create our initial sum */
2164 sum += skb_frag_size(frag++);
2165 sum += skb_frag_size(frag++);
2166 sum += skb_frag_size(frag++);
2167 sum += skb_frag_size(frag++);
2168 sum += skb_frag_size(frag++);
2170 /* Walk through fragments adding latest fragment, testing it, and
2171 * then removing stale fragments from the sum.
2173 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2174 int stale_size = skb_frag_size(stale);
2176 sum += skb_frag_size(frag++);
2178 /* The stale fragment may present us with a smaller
2179 * descriptor than the actual fragment size. To account
2180 * for that we need to remove all the data on the front and
2181 * figure out what the remainder would be in the last
2182 * descriptor associated with the fragment.
2184 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2185 int align_pad = -(skb_frag_off(stale)) &
2186 (ICE_MAX_READ_REQ_SIZE - 1);
2189 stale_size -= align_pad;
2192 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2193 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2194 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2197 /* if sum is negative we failed to make sufficient progress */
2211 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2213 * @count: number of buffers used
2215 * Note: Our HW can't scatter-gather more than 8 fragments to build
2216 * a packet on the wire and so we need to figure out the cases where we
2217 * need to linearize the skb.
2219 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2221 /* Both TSO and single send will work if count is less than 8 */
2222 if (likely(count < ICE_MAX_BUF_TXD))
2225 if (skb_is_gso(skb))
2226 return __ice_chk_linearize(skb);
2228 /* we can support up to 8 data buffers for a single send */
2229 return count != ICE_MAX_BUF_TXD;
2233 * ice_tstamp - set up context descriptor for hardware timestamp
2234 * @tx_ring: pointer to the Tx ring to send buffer on
2235 * @skb: pointer to the SKB we're sending
2237 * @off: Tx offload parameters
2240 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
2241 struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2245 /* only timestamp the outbound packet if the user has requested it */
2246 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2249 if (!tx_ring->ptp_tx)
2252 /* Tx timestamps cannot be sampled when doing TSO */
2253 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2256 /* Grab an open timestamp slot */
2257 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2261 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2262 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2263 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2264 first->tx_flags |= ICE_TX_FLAGS_TSYN;
2268 * ice_xmit_frame_ring - Sends buffer on Tx ring
2270 * @tx_ring: ring to send buffer on
2272 * Returns NETDEV_TX_OK if sent, else an error code
2275 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
2277 struct ice_tx_offload_params offload = { 0 };
2278 struct ice_vsi *vsi = tx_ring->vsi;
2279 struct ice_tx_buf *first;
2284 ice_trace(xmit_frame_ring, tx_ring, skb);
2286 count = ice_xmit_desc_count(skb);
2287 if (ice_chk_linearize(skb, count)) {
2288 if (__skb_linearize(skb))
2290 count = ice_txd_use_count(skb->len);
2291 tx_ring->tx_stats.tx_linearize++;
2294 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2295 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2296 * + 4 desc gap to avoid the cache line where head is,
2297 * + 1 desc for context descriptor,
2298 * otherwise try next time
2300 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2301 ICE_DESCS_FOR_CTX_DESC)) {
2302 tx_ring->tx_stats.tx_busy++;
2303 return NETDEV_TX_BUSY;
2306 /* prefetch for bql data which is infrequently used */
2307 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
2309 offload.tx_ring = tx_ring;
2311 /* record the location of the first descriptor for this packet */
2312 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2314 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2315 first->gso_segs = 1;
2316 first->tx_flags = 0;
2318 /* prepare the VLAN tagging flags for Tx */
2319 ice_tx_prepare_vlan_flags(tx_ring, first);
2320 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2321 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2322 (ICE_TX_CTX_DESC_IL2TAG2 <<
2323 ICE_TXD_CTX_QW1_CMD_S));
2324 offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
2325 ICE_TX_FLAGS_VLAN_S;
2328 /* set up TSO offload */
2329 tso = ice_tso(first, &offload);
2333 /* always set up Tx checksum offload */
2334 csum = ice_tx_csum(first, &offload);
2338 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2339 eth = (struct ethhdr *)skb_mac_header(skb);
2340 if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2341 eth->h_proto == htons(ETH_P_LLDP)) &&
2342 vsi->type == ICE_VSI_PF &&
2343 vsi->port_info->qos_cfg.is_sw_lldp))
2344 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2345 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2346 ICE_TXD_CTX_QW1_CMD_S);
2348 ice_tstamp(tx_ring, skb, first, &offload);
2349 if (ice_is_switchdev_running(vsi->back))
2350 ice_eswitch_set_target_vsi(skb, &offload);
2352 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2353 struct ice_tx_ctx_desc *cdesc;
2354 u16 i = tx_ring->next_to_use;
2356 /* grab the next descriptor */
2357 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2359 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2361 /* setup context descriptor */
2362 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2363 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2364 cdesc->rsvd = cpu_to_le16(0);
2365 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2368 ice_tx_map(tx_ring, first, &offload);
2369 return NETDEV_TX_OK;
2372 ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2373 dev_kfree_skb_any(skb);
2374 return NETDEV_TX_OK;
2378 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2380 * @netdev: network interface device structure
2382 * Returns NETDEV_TX_OK if sent, else an error code
2384 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2386 struct ice_netdev_priv *np = netdev_priv(netdev);
2387 struct ice_vsi *vsi = np->vsi;
2388 struct ice_tx_ring *tx_ring;
2390 tx_ring = vsi->tx_rings[skb->queue_mapping];
2392 /* hardware can't handle really short frames, hardware padding works
2395 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2396 return NETDEV_TX_OK;
2398 return ice_xmit_frame_ring(skb, tx_ring);
2402 * ice_get_dscp_up - return the UP/TC value for a SKB
2403 * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
2404 * @skb: SKB to query for info to determine UP/TC
2406 * This function is to only be called when the PF is in L3 DSCP PFC mode
2408 static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
2412 if (skb->protocol == htons(ETH_P_IP))
2413 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
2414 else if (skb->protocol == htons(ETH_P_IPV6))
2415 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
2417 return dcbcfg->dscp_map[dscp];
2421 ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
2422 struct net_device *sb_dev)
2424 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2425 struct ice_dcbx_cfg *dcbcfg;
2427 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
2428 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
2429 skb->priority = ice_get_dscp_up(dcbcfg, skb);
2431 return netdev_pick_tx(netdev, skb, sb_dev);
2435 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2436 * @tx_ring: tx_ring to clean
2438 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
2440 struct ice_vsi *vsi = tx_ring->vsi;
2441 s16 i = tx_ring->next_to_clean;
2442 int budget = ICE_DFLT_IRQ_WORK;
2443 struct ice_tx_desc *tx_desc;
2444 struct ice_tx_buf *tx_buf;
2446 tx_buf = &tx_ring->tx_buf[i];
2447 tx_desc = ICE_TX_DESC(tx_ring, i);
2448 i -= tx_ring->count;
2451 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2453 /* if next_to_watch is not set then there is no pending work */
2457 /* prevent any other reads prior to eop_desc */
2460 /* if the descriptor isn't done, no work to do */
2461 if (!(eop_desc->cmd_type_offset_bsz &
2462 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2465 /* clear next_to_watch to prevent false hangs */
2466 tx_buf->next_to_watch = NULL;
2467 tx_desc->buf_addr = 0;
2468 tx_desc->cmd_type_offset_bsz = 0;
2470 /* move past filter desc */
2475 i -= tx_ring->count;
2476 tx_buf = tx_ring->tx_buf;
2477 tx_desc = ICE_TX_DESC(tx_ring, 0);
2480 /* unmap the data header */
2481 if (dma_unmap_len(tx_buf, len))
2482 dma_unmap_single(tx_ring->dev,
2483 dma_unmap_addr(tx_buf, dma),
2484 dma_unmap_len(tx_buf, len),
2486 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2487 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2489 /* clear next_to_watch to prevent false hangs */
2490 tx_buf->raw_buf = NULL;
2491 tx_buf->tx_flags = 0;
2492 tx_buf->next_to_watch = NULL;
2493 dma_unmap_len_set(tx_buf, len, 0);
2494 tx_desc->buf_addr = 0;
2495 tx_desc->cmd_type_offset_bsz = 0;
2497 /* move past eop_desc for start of next FD desc */
2502 i -= tx_ring->count;
2503 tx_buf = tx_ring->tx_buf;
2504 tx_desc = ICE_TX_DESC(tx_ring, 0);
2508 } while (likely(budget));
2510 i += tx_ring->count;
2511 tx_ring->next_to_clean = i;
2513 /* re-enable interrupt if needed */
2514 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);