1 // SPDX-License-Identifier: GPL-2.0
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
9 #include <linux/ethtool.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/list.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <linux/math64.h>
18 #include <linux/module.h>
19 #include <linux/spinlock.h>
20 #include <linux/rcupdate.h>
21 #include <linux/time.h>
22 #include <net/netlink.h>
23 #include <net/pkt_sched.h>
24 #include <net/pkt_cls.h>
25 #include <net/sch_generic.h>
29 static LIST_HEAD(taprio_list);
30 static DEFINE_SPINLOCK(taprio_list_lock);
32 #define TAPRIO_ALL_GATES_OPEN -1
34 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
35 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
36 #define TAPRIO_FLAGS_INVALID U32_MAX
39 struct list_head list;
41 /* The instant that this entry "closes" and the next one
42 * should open, the qdisc will make some effort so that no
43 * packet leaves after this time.
54 struct sched_gate_list {
56 struct list_head entries;
58 ktime_t cycle_close_time;
60 s64 cycle_time_extension;
65 struct Qdisc **qdiscs;
68 enum tk_offsets tk_offset;
71 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
72 * speeds it's sub-nanoseconds per byte
75 /* Protects the update side of the RCU protected current_entry */
76 spinlock_t current_entry_lock;
77 struct sched_entry __rcu *current_entry;
78 struct sched_gate_list __rcu *oper_sched;
79 struct sched_gate_list __rcu *admin_sched;
80 struct hrtimer advance_timer;
81 struct list_head taprio_list;
82 struct sk_buff *(*dequeue)(struct Qdisc *sch);
83 struct sk_buff *(*peek)(struct Qdisc *sch);
87 struct __tc_taprio_qopt_offload {
89 struct tc_taprio_qopt_offload offload;
92 static ktime_t sched_base_time(const struct sched_gate_list *sched)
97 return ns_to_ktime(sched->base_time);
100 static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
102 /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */
103 enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
109 return ktime_mono_to_any(mono, tk_offset);
113 static ktime_t taprio_get_time(const struct taprio_sched *q)
115 return taprio_mono_to_any(q, ktime_get());
118 static void taprio_free_sched_cb(struct rcu_head *head)
120 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
121 struct sched_entry *entry, *n;
123 list_for_each_entry_safe(entry, n, &sched->entries, list) {
124 list_del(&entry->list);
131 static void switch_schedules(struct taprio_sched *q,
132 struct sched_gate_list **admin,
133 struct sched_gate_list **oper)
135 rcu_assign_pointer(q->oper_sched, *admin);
136 rcu_assign_pointer(q->admin_sched, NULL);
139 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
145 /* Get how much time has been already elapsed in the current cycle. */
146 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
148 ktime_t time_since_sched_start;
151 time_since_sched_start = ktime_sub(time, sched->base_time);
152 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
157 static ktime_t get_interval_end_time(struct sched_gate_list *sched,
158 struct sched_gate_list *admin,
159 struct sched_entry *entry,
162 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
163 ktime_t intv_end, cycle_ext_end, cycle_end;
165 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
166 intv_end = ktime_add_ns(intv_start, entry->interval);
167 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
169 if (ktime_before(intv_end, cycle_end))
171 else if (admin && admin != sched &&
172 ktime_after(admin->base_time, cycle_end) &&
173 ktime_before(admin->base_time, cycle_ext_end))
174 return admin->base_time;
179 static int length_to_duration(struct taprio_sched *q, int len)
181 return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
184 /* Returns the entry corresponding to next available interval. If
185 * validate_interval is set, it only validates whether the timestamp occurs
186 * when the gate corresponding to the skb's traffic class is open.
188 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
190 struct sched_gate_list *sched,
191 struct sched_gate_list *admin,
193 ktime_t *interval_start,
194 ktime_t *interval_end,
195 bool validate_interval)
197 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
198 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
199 struct sched_entry *entry = NULL, *entry_found = NULL;
200 struct taprio_sched *q = qdisc_priv(sch);
201 struct net_device *dev = qdisc_dev(sch);
202 bool entry_available = false;
206 tc = netdev_get_prio_tc_map(dev, skb->priority);
207 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
215 cycle = sched->cycle_time;
216 cycle_elapsed = get_cycle_time_elapsed(sched, time);
217 curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
218 cycle_end = ktime_add_ns(curr_intv_end, cycle);
220 list_for_each_entry(entry, &sched->entries, list) {
221 curr_intv_start = curr_intv_end;
222 curr_intv_end = get_interval_end_time(sched, admin, entry,
225 if (ktime_after(curr_intv_start, cycle_end))
228 if (!(entry->gate_mask & BIT(tc)) ||
229 packet_transmit_time > entry->interval)
232 txtime = entry->next_txtime;
234 if (ktime_before(txtime, time) || validate_interval) {
235 transmit_end_time = ktime_add_ns(time, packet_transmit_time);
236 if ((ktime_before(curr_intv_start, time) &&
237 ktime_before(transmit_end_time, curr_intv_end)) ||
238 (ktime_after(curr_intv_start, time) && !validate_interval)) {
240 *interval_start = curr_intv_start;
241 *interval_end = curr_intv_end;
243 } else if (!entry_available && !validate_interval) {
244 /* Here, we are just trying to find out the
245 * first available interval in the next cycle.
247 entry_available = true;
249 *interval_start = ktime_add_ns(curr_intv_start, cycle);
250 *interval_end = ktime_add_ns(curr_intv_end, cycle);
252 } else if (ktime_before(txtime, earliest_txtime) &&
254 earliest_txtime = txtime;
256 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
257 *interval_start = ktime_add(curr_intv_start, n * cycle);
258 *interval_end = ktime_add(curr_intv_end, n * cycle);
265 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
267 struct taprio_sched *q = qdisc_priv(sch);
268 struct sched_gate_list *sched, *admin;
269 ktime_t interval_start, interval_end;
270 struct sched_entry *entry;
273 sched = rcu_dereference(q->oper_sched);
274 admin = rcu_dereference(q->admin_sched);
276 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
277 &interval_start, &interval_end, true);
283 static bool taprio_flags_valid(u32 flags)
285 /* Make sure no other flag bits are set. */
286 if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
287 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
289 /* txtime-assist and full offload are mutually exclusive */
290 if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
291 (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
296 /* This returns the tstamp value set by TCP in terms of the set clock. */
297 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
299 unsigned int offset = skb_network_offset(skb);
300 const struct ipv6hdr *ipv6h;
301 const struct iphdr *iph;
302 struct ipv6hdr _ipv6h;
304 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
308 if (ipv6h->version == 4) {
309 iph = (struct iphdr *)ipv6h;
310 offset += iph->ihl * 4;
312 /* special-case 6in4 tunnelling, as that is a common way to get
313 * v6 connectivity in the home
315 if (iph->protocol == IPPROTO_IPV6) {
316 ipv6h = skb_header_pointer(skb, offset,
317 sizeof(_ipv6h), &_ipv6h);
319 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
321 } else if (iph->protocol != IPPROTO_TCP) {
324 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
328 return taprio_mono_to_any(q, skb->skb_mstamp_ns);
331 /* There are a few scenarios where we will have to modify the txtime from
332 * what is read from next_txtime in sched_entry. They are:
333 * 1. If txtime is in the past,
334 * a. The gate for the traffic class is currently open and packet can be
335 * transmitted before it closes, schedule the packet right away.
336 * b. If the gate corresponding to the traffic class is going to open later
337 * in the cycle, set the txtime of packet to the interval start.
338 * 2. If txtime is in the future, there are packets corresponding to the
339 * current traffic class waiting to be transmitted. So, the following
340 * possibilities exist:
341 * a. We can transmit the packet before the window containing the txtime
343 * b. The window might close before the transmission can be completed
344 * successfully. So, schedule the packet in the next open window.
346 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
348 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
349 struct taprio_sched *q = qdisc_priv(sch);
350 struct sched_gate_list *sched, *admin;
351 ktime_t minimum_time, now, txtime;
352 int len, packet_transmit_time;
353 struct sched_entry *entry;
356 now = taprio_get_time(q);
357 minimum_time = ktime_add_ns(now, q->txtime_delay);
359 tcp_tstamp = get_tcp_tstamp(q, skb);
360 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
363 admin = rcu_dereference(q->admin_sched);
364 sched = rcu_dereference(q->oper_sched);
365 if (admin && ktime_after(minimum_time, admin->base_time))
366 switch_schedules(q, &admin, &sched);
368 /* Until the schedule starts, all the queues are open */
369 if (!sched || ktime_before(minimum_time, sched->base_time)) {
370 txtime = minimum_time;
374 len = qdisc_pkt_len(skb);
375 packet_transmit_time = length_to_duration(q, len);
378 sched_changed = false;
380 entry = find_entry_to_transmit(skb, sch, sched, admin,
382 &interval_start, &interval_end,
389 txtime = entry->next_txtime;
390 txtime = max_t(ktime_t, txtime, minimum_time);
391 txtime = max_t(ktime_t, txtime, interval_start);
393 if (admin && admin != sched &&
394 ktime_after(txtime, admin->base_time)) {
396 sched_changed = true;
400 transmit_end_time = ktime_add(txtime, packet_transmit_time);
401 minimum_time = transmit_end_time;
403 /* Update the txtime of current entry to the next time it's
406 if (ktime_after(transmit_end_time, interval_end))
407 entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
408 } while (sched_changed || ktime_after(transmit_end_time, interval_end));
410 entry->next_txtime = transmit_end_time;
417 static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
418 struct Qdisc *child, struct sk_buff **to_free)
420 struct taprio_sched *q = qdisc_priv(sch);
422 /* sk_flags are only safe to use on full sockets. */
423 if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
424 if (!is_valid_interval(skb, sch))
425 return qdisc_drop(skb, sch, to_free);
426 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
427 skb->tstamp = get_packet_txtime(skb, sch);
429 return qdisc_drop(skb, sch, to_free);
432 qdisc_qstats_backlog_inc(sch, skb);
435 return qdisc_enqueue(skb, child, to_free);
438 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
439 struct sk_buff **to_free)
441 struct taprio_sched *q = qdisc_priv(sch);
445 if (unlikely(FULL_OFFLOAD_IS_ENABLED(q->flags))) {
446 WARN_ONCE(1, "Trying to enqueue skb into the root of a taprio qdisc configured with full offload\n");
447 return qdisc_drop(skb, sch, to_free);
450 queue = skb_get_queue_mapping(skb);
452 child = q->qdiscs[queue];
453 if (unlikely(!child))
454 return qdisc_drop(skb, sch, to_free);
456 /* Large packets might not be transmitted when the transmission duration
457 * exceeds any configured interval. Therefore, segment the skb into
458 * smaller chunks. Skip it for the full offload case, as the driver
459 * and/or the hardware is expected to handle this.
461 if (skb_is_gso(skb) && !FULL_OFFLOAD_IS_ENABLED(q->flags)) {
462 unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
463 netdev_features_t features = netif_skb_features(skb);
464 struct sk_buff *segs, *nskb;
467 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
468 if (IS_ERR_OR_NULL(segs))
469 return qdisc_drop(skb, sch, to_free);
471 skb_list_walk_safe(segs, segs, nskb) {
472 skb_mark_not_on_list(segs);
473 qdisc_skb_cb(segs)->pkt_len = segs->len;
476 ret = taprio_enqueue_one(segs, sch, child, to_free);
477 if (ret != NET_XMIT_SUCCESS) {
478 if (net_xmit_drop_count(ret))
479 qdisc_qstats_drop(sch);
486 qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
489 return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
492 return taprio_enqueue_one(skb, sch, child, to_free);
495 static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
497 struct taprio_sched *q = qdisc_priv(sch);
498 struct net_device *dev = qdisc_dev(sch);
499 struct sched_entry *entry;
505 entry = rcu_dereference(q->current_entry);
506 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
512 for (i = 0; i < dev->num_tx_queues; i++) {
513 struct Qdisc *child = q->qdiscs[i];
517 if (unlikely(!child))
520 skb = child->ops->peek(child);
524 if (TXTIME_ASSIST_IS_ENABLED(q->flags))
527 prio = skb->priority;
528 tc = netdev_get_prio_tc_map(dev, prio);
530 if (!(gate_mask & BIT(tc)))
539 static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
541 WARN_ONCE(1, "Trying to peek into the root of a taprio qdisc configured with full offload\n");
546 static struct sk_buff *taprio_peek(struct Qdisc *sch)
548 struct taprio_sched *q = qdisc_priv(sch);
553 static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
555 atomic_set(&entry->budget,
556 div64_u64((u64)entry->interval * PSEC_PER_NSEC,
557 atomic64_read(&q->picos_per_byte)));
560 static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
562 struct taprio_sched *q = qdisc_priv(sch);
563 struct net_device *dev = qdisc_dev(sch);
564 struct sk_buff *skb = NULL;
565 struct sched_entry *entry;
570 entry = rcu_dereference(q->current_entry);
571 /* if there's no entry, it means that the schedule didn't
572 * start yet, so force all gates to be open, this is in
573 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
576 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
581 for (i = 0; i < dev->num_tx_queues; i++) {
582 struct Qdisc *child = q->qdiscs[i];
588 if (unlikely(!child))
591 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
592 skb = child->ops->dequeue(child);
598 skb = child->ops->peek(child);
602 prio = skb->priority;
603 tc = netdev_get_prio_tc_map(dev, prio);
605 if (!(gate_mask & BIT(tc))) {
610 len = qdisc_pkt_len(skb);
611 guard = ktime_add_ns(taprio_get_time(q),
612 length_to_duration(q, len));
614 /* In the case that there's no gate entry, there's no
617 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
618 ktime_after(guard, entry->close_time)) {
623 /* ... and no budget. */
624 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
625 atomic_sub_return(len, &entry->budget) < 0) {
630 skb = child->ops->dequeue(child);
635 qdisc_bstats_update(sch, skb);
636 qdisc_qstats_backlog_dec(sch, skb);
648 static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
650 WARN_ONCE(1, "Trying to dequeue from the root of a taprio qdisc configured with full offload\n");
655 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
657 struct taprio_sched *q = qdisc_priv(sch);
659 return q->dequeue(sch);
662 static bool should_restart_cycle(const struct sched_gate_list *oper,
663 const struct sched_entry *entry)
665 if (list_is_last(&entry->list, &oper->entries))
668 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
674 static bool should_change_schedules(const struct sched_gate_list *admin,
675 const struct sched_gate_list *oper,
678 ktime_t next_base_time, extension_time;
683 next_base_time = sched_base_time(admin);
685 /* This is the simple case, the close_time would fall after
686 * the next schedule base_time.
688 if (ktime_compare(next_base_time, close_time) <= 0)
691 /* This is the cycle_time_extension case, if the close_time
692 * plus the amount that can be extended would fall after the
693 * next schedule base_time, we can extend the current schedule
696 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
698 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
699 * how precisely the extension should be made. So after
700 * conformance testing, this logic may change.
702 if (ktime_compare(next_base_time, extension_time) <= 0)
708 static enum hrtimer_restart advance_sched(struct hrtimer *timer)
710 struct taprio_sched *q = container_of(timer, struct taprio_sched,
712 struct sched_gate_list *oper, *admin;
713 struct sched_entry *entry, *next;
714 struct Qdisc *sch = q->root;
717 spin_lock(&q->current_entry_lock);
718 entry = rcu_dereference_protected(q->current_entry,
719 lockdep_is_held(&q->current_entry_lock));
720 oper = rcu_dereference_protected(q->oper_sched,
721 lockdep_is_held(&q->current_entry_lock));
722 admin = rcu_dereference_protected(q->admin_sched,
723 lockdep_is_held(&q->current_entry_lock));
726 switch_schedules(q, &admin, &oper);
728 /* This can happen in two cases: 1. this is the very first run
729 * of this function (i.e. we weren't running any schedule
730 * previously); 2. The previous schedule just ended. The first
731 * entry of all schedules are pre-calculated during the
732 * schedule initialization.
734 if (unlikely(!entry || entry->close_time == oper->base_time)) {
735 next = list_first_entry(&oper->entries, struct sched_entry,
737 close_time = next->close_time;
741 if (should_restart_cycle(oper, entry)) {
742 next = list_first_entry(&oper->entries, struct sched_entry,
744 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
747 next = list_next_entry(entry, list);
750 close_time = ktime_add_ns(entry->close_time, next->interval);
751 close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
753 if (should_change_schedules(admin, oper, close_time)) {
754 /* Set things so the next time this runs, the new
757 close_time = sched_base_time(admin);
758 switch_schedules(q, &admin, &oper);
761 next->close_time = close_time;
762 taprio_set_budget(q, next);
765 rcu_assign_pointer(q->current_entry, next);
766 spin_unlock(&q->current_entry_lock);
768 hrtimer_set_expires(&q->advance_timer, close_time);
771 __netif_schedule(sch);
774 return HRTIMER_RESTART;
777 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
778 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
779 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
780 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
781 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
784 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
785 [TCA_TAPRIO_ATTR_PRIOMAP] = {
786 .len = sizeof(struct tc_mqprio_qopt)
788 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
789 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
790 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
791 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
792 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
793 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
794 [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
795 [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
798 static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
799 struct sched_entry *entry,
800 struct netlink_ext_ack *extack)
802 int min_duration = length_to_duration(q, ETH_ZLEN);
805 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
806 entry->command = nla_get_u8(
807 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
809 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
810 entry->gate_mask = nla_get_u32(
811 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
813 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
814 interval = nla_get_u32(
815 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
817 /* The interval should allow at least the minimum ethernet
820 if (interval < min_duration) {
821 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
825 entry->interval = interval;
830 static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
831 struct sched_entry *entry, int index,
832 struct netlink_ext_ack *extack)
834 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
837 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
840 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
844 entry->index = index;
846 return fill_sched_entry(q, tb, entry, extack);
849 static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
850 struct sched_gate_list *sched,
851 struct netlink_ext_ack *extack)
860 nla_for_each_nested(n, list, rem) {
861 struct sched_entry *entry;
863 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
864 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
868 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
870 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
874 err = parse_sched_entry(q, n, entry, i, extack);
880 list_add_tail(&entry->list, &sched->entries);
884 sched->num_entries = i;
889 static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
890 struct sched_gate_list *new,
891 struct netlink_ext_ack *extack)
895 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
896 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
900 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
901 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
903 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
904 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
906 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
907 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
909 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
910 err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
915 if (!new->cycle_time) {
916 struct sched_entry *entry;
919 list_for_each_entry(entry, &new->entries, list)
920 cycle = ktime_add_ns(cycle, entry->interval);
923 NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
927 new->cycle_time = cycle;
933 static int taprio_parse_mqprio_opt(struct net_device *dev,
934 struct tc_mqprio_qopt *qopt,
935 struct netlink_ext_ack *extack,
940 if (!qopt && !dev->num_tc) {
941 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
945 /* If num_tc is already set, it means that the user already
946 * configured the mqprio part
951 /* Verify num_tc is not out of max range */
952 if (qopt->num_tc > TC_MAX_QUEUE) {
953 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
957 /* taprio imposes that traffic classes map 1:n to tx queues */
958 if (qopt->num_tc > dev->num_tx_queues) {
959 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
963 /* Verify priority mapping uses valid tcs */
964 for (i = 0; i <= TC_BITMASK; i++) {
965 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
966 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
971 for (i = 0; i < qopt->num_tc; i++) {
972 unsigned int last = qopt->offset[i] + qopt->count[i];
974 /* Verify the queue count is in tx range being equal to the
975 * real_num_tx_queues indicates the last queue is in use.
977 if (qopt->offset[i] >= dev->num_tx_queues ||
979 last > dev->real_num_tx_queues) {
980 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
984 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
987 /* Verify that the offset and counts do not overlap */
988 for (j = i + 1; j < qopt->num_tc; j++) {
989 if (last > qopt->offset[j]) {
990 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
999 static int taprio_get_start_time(struct Qdisc *sch,
1000 struct sched_gate_list *sched,
1003 struct taprio_sched *q = qdisc_priv(sch);
1004 ktime_t now, base, cycle;
1007 base = sched_base_time(sched);
1008 now = taprio_get_time(q);
1010 if (ktime_after(base, now)) {
1015 cycle = sched->cycle_time;
1017 /* The qdisc is expected to have at least one sched_entry. Moreover,
1018 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
1019 * something went really wrong. In that case, we should warn about this
1020 * inconsistent state and return error.
1022 if (WARN_ON(!cycle))
1025 /* Schedule the start time for the beginning of the next
1028 n = div64_s64(ktime_sub_ns(now, base), cycle);
1029 *start = ktime_add_ns(base, (n + 1) * cycle);
1033 static void setup_first_close_time(struct taprio_sched *q,
1034 struct sched_gate_list *sched, ktime_t base)
1036 struct sched_entry *first;
1039 first = list_first_entry(&sched->entries,
1040 struct sched_entry, list);
1042 cycle = sched->cycle_time;
1044 /* FIXME: find a better place to do this */
1045 sched->cycle_close_time = ktime_add_ns(base, cycle);
1047 first->close_time = ktime_add_ns(base, first->interval);
1048 taprio_set_budget(q, first);
1049 rcu_assign_pointer(q->current_entry, NULL);
1052 static void taprio_start_sched(struct Qdisc *sch,
1053 ktime_t start, struct sched_gate_list *new)
1055 struct taprio_sched *q = qdisc_priv(sch);
1058 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1061 expires = hrtimer_get_expires(&q->advance_timer);
1063 expires = KTIME_MAX;
1065 /* If the new schedule starts before the next expiration, we
1066 * reprogram it to the earliest one, so we change the admin
1067 * schedule to the operational one at the right time.
1069 start = min_t(ktime_t, start, expires);
1071 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
1074 static void taprio_set_picos_per_byte(struct net_device *dev,
1075 struct taprio_sched *q)
1077 struct ethtool_link_ksettings ecmd;
1078 int speed = SPEED_10;
1082 err = __ethtool_get_link_ksettings(dev, &ecmd);
1086 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1087 speed = ecmd.base.speed;
1090 picos_per_byte = (USEC_PER_SEC * 8) / speed;
1092 atomic64_set(&q->picos_per_byte, picos_per_byte);
1093 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1094 dev->name, (long long)atomic64_read(&q->picos_per_byte),
1098 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
1101 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1102 struct net_device *qdev;
1103 struct taprio_sched *q;
1108 if (event != NETDEV_UP && event != NETDEV_CHANGE)
1111 spin_lock(&taprio_list_lock);
1112 list_for_each_entry(q, &taprio_list, taprio_list) {
1113 qdev = qdisc_dev(q->root);
1119 spin_unlock(&taprio_list_lock);
1122 taprio_set_picos_per_byte(dev, q);
1127 static void setup_txtime(struct taprio_sched *q,
1128 struct sched_gate_list *sched, ktime_t base)
1130 struct sched_entry *entry;
1133 list_for_each_entry(entry, &sched->entries, list) {
1134 entry->next_txtime = ktime_add_ns(base, interval);
1135 interval += entry->interval;
1139 static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
1141 struct __tc_taprio_qopt_offload *__offload;
1143 __offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
1148 refcount_set(&__offload->users, 1);
1150 return &__offload->offload;
1153 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
1156 struct __tc_taprio_qopt_offload *__offload;
1158 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1161 refcount_inc(&__offload->users);
1165 EXPORT_SYMBOL_GPL(taprio_offload_get);
1167 void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
1169 struct __tc_taprio_qopt_offload *__offload;
1171 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1174 if (!refcount_dec_and_test(&__offload->users))
1179 EXPORT_SYMBOL_GPL(taprio_offload_free);
1181 /* The function will only serve to keep the pointers to the "oper" and "admin"
1182 * schedules valid in relation to their base times, so when calling dump() the
1183 * users looks at the right schedules.
1184 * When using full offload, the admin configuration is promoted to oper at the
1185 * base_time in the PHC time domain. But because the system time is not
1186 * necessarily in sync with that, we can't just trigger a hrtimer to call
1187 * switch_schedules at the right hardware time.
1188 * At the moment we call this by hand right away from taprio, but in the future
1189 * it will be useful to create a mechanism for drivers to notify taprio of the
1190 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1191 * This is left as TODO.
1193 static void taprio_offload_config_changed(struct taprio_sched *q)
1195 struct sched_gate_list *oper, *admin;
1197 spin_lock(&q->current_entry_lock);
1199 oper = rcu_dereference_protected(q->oper_sched,
1200 lockdep_is_held(&q->current_entry_lock));
1201 admin = rcu_dereference_protected(q->admin_sched,
1202 lockdep_is_held(&q->current_entry_lock));
1204 switch_schedules(q, &admin, &oper);
1206 spin_unlock(&q->current_entry_lock);
1209 static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
1211 u32 i, queue_mask = 0;
1213 for (i = 0; i < dev->num_tc; i++) {
1216 if (!(tc_mask & BIT(i)))
1219 offset = dev->tc_to_txq[i].offset;
1220 count = dev->tc_to_txq[i].count;
1222 queue_mask |= GENMASK(offset + count - 1, offset);
1228 static void taprio_sched_to_offload(struct net_device *dev,
1229 struct sched_gate_list *sched,
1230 struct tc_taprio_qopt_offload *offload)
1232 struct sched_entry *entry;
1235 offload->base_time = sched->base_time;
1236 offload->cycle_time = sched->cycle_time;
1237 offload->cycle_time_extension = sched->cycle_time_extension;
1239 list_for_each_entry(entry, &sched->entries, list) {
1240 struct tc_taprio_sched_entry *e = &offload->entries[i];
1242 e->command = entry->command;
1243 e->interval = entry->interval;
1244 e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
1249 offload->num_entries = i;
1252 static int taprio_enable_offload(struct net_device *dev,
1253 struct taprio_sched *q,
1254 struct sched_gate_list *sched,
1255 struct netlink_ext_ack *extack)
1257 const struct net_device_ops *ops = dev->netdev_ops;
1258 struct tc_taprio_qopt_offload *offload;
1261 if (!ops->ndo_setup_tc) {
1262 NL_SET_ERR_MSG(extack,
1263 "Device does not support taprio offload");
1267 offload = taprio_offload_alloc(sched->num_entries);
1269 NL_SET_ERR_MSG(extack,
1270 "Not enough memory for enabling offload mode");
1273 offload->enable = 1;
1274 taprio_sched_to_offload(dev, sched, offload);
1276 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1278 NL_SET_ERR_MSG(extack,
1279 "Device failed to setup taprio offload");
1283 q->offloaded = true;
1286 taprio_offload_free(offload);
1291 static int taprio_disable_offload(struct net_device *dev,
1292 struct taprio_sched *q,
1293 struct netlink_ext_ack *extack)
1295 const struct net_device_ops *ops = dev->netdev_ops;
1296 struct tc_taprio_qopt_offload *offload;
1302 offload = taprio_offload_alloc(0);
1304 NL_SET_ERR_MSG(extack,
1305 "Not enough memory to disable offload mode");
1308 offload->enable = 0;
1310 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1312 NL_SET_ERR_MSG(extack,
1313 "Device failed to disable offload");
1317 q->offloaded = false;
1320 taprio_offload_free(offload);
1325 /* If full offload is enabled, the only possible clockid is the net device's
1326 * PHC. For that reason, specifying a clockid through netlink is incorrect.
1327 * For txtime-assist, it is implicitly assumed that the device's PHC is kept
1328 * in sync with the specified clockid via a user space daemon such as phc2sys.
1329 * For both software taprio and txtime-assist, the clockid is used for the
1330 * hrtimer that advances the schedule and hence mandatory.
1332 static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
1333 struct netlink_ext_ack *extack)
1335 struct taprio_sched *q = qdisc_priv(sch);
1336 struct net_device *dev = qdisc_dev(sch);
1339 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1340 const struct ethtool_ops *ops = dev->ethtool_ops;
1341 struct ethtool_ts_info info = {
1342 .cmd = ETHTOOL_GET_TS_INFO,
1346 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1347 NL_SET_ERR_MSG(extack,
1348 "The 'clockid' cannot be specified for full offload");
1352 if (ops && ops->get_ts_info)
1353 err = ops->get_ts_info(dev, &info);
1355 if (err || info.phc_index < 0) {
1356 NL_SET_ERR_MSG(extack,
1357 "Device does not have a PTP clock");
1361 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1362 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1363 enum tk_offsets tk_offset;
1365 /* We only support static clockids and we don't allow
1366 * for it to be modified after the first init.
1369 (q->clockid != -1 && q->clockid != clockid)) {
1370 NL_SET_ERR_MSG(extack,
1371 "Changing the 'clockid' of a running schedule is not supported");
1377 case CLOCK_REALTIME:
1378 tk_offset = TK_OFFS_REAL;
1380 case CLOCK_MONOTONIC:
1381 tk_offset = TK_OFFS_MAX;
1383 case CLOCK_BOOTTIME:
1384 tk_offset = TK_OFFS_BOOT;
1387 tk_offset = TK_OFFS_TAI;
1390 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1394 /* This pairs with READ_ONCE() in taprio_mono_to_any */
1395 WRITE_ONCE(q->tk_offset, tk_offset);
1397 q->clockid = clockid;
1399 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1403 /* Everything went ok, return success. */
1410 static int taprio_mqprio_cmp(const struct net_device *dev,
1411 const struct tc_mqprio_qopt *mqprio)
1415 if (!mqprio || mqprio->num_tc != dev->num_tc)
1418 for (i = 0; i < mqprio->num_tc; i++)
1419 if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1420 dev->tc_to_txq[i].offset != mqprio->offset[i])
1423 for (i = 0; i <= TC_BITMASK; i++)
1424 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1430 /* The semantics of the 'flags' argument in relation to 'change()'
1431 * requests, are interpreted following two rules (which are applied in
1432 * this order): (1) an omitted 'flags' argument is interpreted as
1433 * zero; (2) the 'flags' of a "running" taprio instance cannot be
1436 static int taprio_new_flags(const struct nlattr *attr, u32 old,
1437 struct netlink_ext_ack *extack)
1442 new = nla_get_u32(attr);
1444 if (old != TAPRIO_FLAGS_INVALID && old != new) {
1445 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1449 if (!taprio_flags_valid(new)) {
1450 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1457 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1458 struct netlink_ext_ack *extack)
1460 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1461 struct sched_gate_list *oper, *admin, *new_admin;
1462 struct taprio_sched *q = qdisc_priv(sch);
1463 struct net_device *dev = qdisc_dev(sch);
1464 struct tc_mqprio_qopt *mqprio = NULL;
1465 unsigned long flags;
1469 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1470 taprio_policy, extack);
1474 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1475 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1477 err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
1484 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
1488 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1490 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1493 INIT_LIST_HEAD(&new_admin->entries);
1496 oper = rcu_dereference(q->oper_sched);
1497 admin = rcu_dereference(q->admin_sched);
1500 /* no changes - no new mqprio settings */
1501 if (!taprio_mqprio_cmp(dev, mqprio))
1504 if (mqprio && (oper || admin)) {
1505 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1510 err = parse_taprio_schedule(q, tb, new_admin, extack);
1514 if (new_admin->num_entries == 0) {
1515 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1520 err = taprio_parse_clockid(sch, tb, extack);
1524 taprio_set_picos_per_byte(dev, q);
1527 err = netdev_set_num_tc(dev, mqprio->num_tc);
1530 for (i = 0; i < mqprio->num_tc; i++)
1531 netdev_set_tc_queue(dev, i,
1535 /* Always use supplied priority mappings */
1536 for (i = 0; i <= TC_BITMASK; i++)
1537 netdev_set_prio_tc_map(dev, i,
1538 mqprio->prio_tc_map[i]);
1541 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1542 err = taprio_enable_offload(dev, q, new_admin, extack);
1544 err = taprio_disable_offload(dev, q, extack);
1548 /* Protects against enqueue()/dequeue() */
1549 spin_lock_bh(qdisc_lock(sch));
1551 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1552 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1553 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1558 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
1561 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1562 !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1563 !hrtimer_active(&q->advance_timer)) {
1564 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1565 q->advance_timer.function = advance_sched;
1568 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1569 q->dequeue = taprio_dequeue_offload;
1570 q->peek = taprio_peek_offload;
1572 /* Be sure to always keep the function pointers
1573 * in a consistent state.
1575 q->dequeue = taprio_dequeue_soft;
1576 q->peek = taprio_peek_soft;
1579 err = taprio_get_start_time(sch, new_admin, &start);
1581 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1585 setup_txtime(q, new_admin, start);
1587 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1589 rcu_assign_pointer(q->oper_sched, new_admin);
1595 rcu_assign_pointer(q->admin_sched, new_admin);
1597 call_rcu(&admin->rcu, taprio_free_sched_cb);
1599 setup_first_close_time(q, new_admin, start);
1601 /* Protects against advance_sched() */
1602 spin_lock_irqsave(&q->current_entry_lock, flags);
1604 taprio_start_sched(sch, start, new_admin);
1606 rcu_assign_pointer(q->admin_sched, new_admin);
1608 call_rcu(&admin->rcu, taprio_free_sched_cb);
1610 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1612 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1613 taprio_offload_config_changed(q);
1620 spin_unlock_bh(qdisc_lock(sch));
1624 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1629 static void taprio_reset(struct Qdisc *sch)
1631 struct taprio_sched *q = qdisc_priv(sch);
1632 struct net_device *dev = qdisc_dev(sch);
1635 hrtimer_cancel(&q->advance_timer);
1637 for (i = 0; i < dev->num_tx_queues; i++)
1639 qdisc_reset(q->qdiscs[i]);
1641 sch->qstats.backlog = 0;
1645 static void taprio_destroy(struct Qdisc *sch)
1647 struct taprio_sched *q = qdisc_priv(sch);
1648 struct net_device *dev = qdisc_dev(sch);
1651 spin_lock(&taprio_list_lock);
1652 list_del(&q->taprio_list);
1653 spin_unlock(&taprio_list_lock);
1655 /* Note that taprio_reset() might not be called if an error
1656 * happens in qdisc_create(), after taprio_init() has been called.
1658 hrtimer_cancel(&q->advance_timer);
1660 taprio_disable_offload(dev, q, NULL);
1663 for (i = 0; i < dev->num_tx_queues; i++)
1664 qdisc_put(q->qdiscs[i]);
1670 netdev_reset_tc(dev);
1673 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
1676 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
1679 static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1680 struct netlink_ext_ack *extack)
1682 struct taprio_sched *q = qdisc_priv(sch);
1683 struct net_device *dev = qdisc_dev(sch);
1686 spin_lock_init(&q->current_entry_lock);
1688 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
1689 q->advance_timer.function = advance_sched;
1691 q->dequeue = taprio_dequeue_soft;
1692 q->peek = taprio_peek_soft;
1696 /* We only support static clockids. Use an invalid value as default
1697 * and get the valid one on taprio_change().
1700 q->flags = TAPRIO_FLAGS_INVALID;
1702 spin_lock(&taprio_list_lock);
1703 list_add(&q->taprio_list, &taprio_list);
1704 spin_unlock(&taprio_list_lock);
1706 if (sch->parent != TC_H_ROOT)
1709 if (!netif_is_multiqueue(dev))
1712 /* pre-allocate qdisc, attachment can't fail */
1713 q->qdiscs = kcalloc(dev->num_tx_queues,
1714 sizeof(q->qdiscs[0]),
1723 for (i = 0; i < dev->num_tx_queues; i++) {
1724 struct netdev_queue *dev_queue;
1725 struct Qdisc *qdisc;
1727 dev_queue = netdev_get_tx_queue(dev, i);
1728 qdisc = qdisc_create_dflt(dev_queue,
1730 TC_H_MAKE(TC_H_MAJ(sch->handle),
1736 if (i < dev->real_num_tx_queues)
1737 qdisc_hash_add(qdisc, false);
1739 q->qdiscs[i] = qdisc;
1742 return taprio_change(sch, opt, extack);
1745 static void taprio_attach(struct Qdisc *sch)
1747 struct taprio_sched *q = qdisc_priv(sch);
1748 struct net_device *dev = qdisc_dev(sch);
1751 /* Attach underlying qdisc */
1752 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1753 struct Qdisc *qdisc = q->qdiscs[ntx];
1756 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1757 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1758 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1760 old = dev_graft_qdisc(qdisc->dev_queue, sch);
1761 qdisc_refcount_inc(sch);
1767 /* access to the child qdiscs is not needed in offload mode */
1768 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1774 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1777 struct net_device *dev = qdisc_dev(sch);
1778 unsigned long ntx = cl - 1;
1780 if (ntx >= dev->num_tx_queues)
1783 return netdev_get_tx_queue(dev, ntx);
1786 static int taprio_graft(struct Qdisc *sch, unsigned long cl,
1787 struct Qdisc *new, struct Qdisc **old,
1788 struct netlink_ext_ack *extack)
1790 struct taprio_sched *q = qdisc_priv(sch);
1791 struct net_device *dev = qdisc_dev(sch);
1792 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1797 if (dev->flags & IFF_UP)
1798 dev_deactivate(dev);
1800 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1801 *old = dev_graft_qdisc(dev_queue, new);
1803 *old = q->qdiscs[cl - 1];
1804 q->qdiscs[cl - 1] = new;
1808 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1810 if (dev->flags & IFF_UP)
1816 static int dump_entry(struct sk_buff *msg,
1817 const struct sched_entry *entry)
1819 struct nlattr *item;
1821 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
1825 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
1826 goto nla_put_failure;
1828 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
1829 goto nla_put_failure;
1831 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1833 goto nla_put_failure;
1835 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1837 goto nla_put_failure;
1839 return nla_nest_end(msg, item);
1842 nla_nest_cancel(msg, item);
1846 static int dump_schedule(struct sk_buff *msg,
1847 const struct sched_gate_list *root)
1849 struct nlattr *entry_list;
1850 struct sched_entry *entry;
1852 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1853 root->base_time, TCA_TAPRIO_PAD))
1856 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1857 root->cycle_time, TCA_TAPRIO_PAD))
1860 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1861 root->cycle_time_extension, TCA_TAPRIO_PAD))
1864 entry_list = nla_nest_start_noflag(msg,
1865 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1869 list_for_each_entry(entry, &root->entries, list) {
1870 if (dump_entry(msg, entry) < 0)
1874 nla_nest_end(msg, entry_list);
1878 nla_nest_cancel(msg, entry_list);
1882 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1884 struct taprio_sched *q = qdisc_priv(sch);
1885 struct net_device *dev = qdisc_dev(sch);
1886 struct sched_gate_list *oper, *admin;
1887 struct tc_mqprio_qopt opt = { 0 };
1888 struct nlattr *nest, *sched_nest;
1892 oper = rcu_dereference(q->oper_sched);
1893 admin = rcu_dereference(q->admin_sched);
1895 opt.num_tc = netdev_get_num_tc(dev);
1896 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1898 for (i = 0; i < netdev_get_num_tc(dev); i++) {
1899 opt.count[i] = dev->tc_to_txq[i].count;
1900 opt.offset[i] = dev->tc_to_txq[i].offset;
1903 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1907 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1910 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1911 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
1914 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
1917 if (q->txtime_delay &&
1918 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
1921 if (oper && dump_schedule(skb, oper))
1927 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1931 if (dump_schedule(skb, admin))
1934 nla_nest_end(skb, sched_nest);
1939 return nla_nest_end(skb, nest);
1942 nla_nest_cancel(skb, sched_nest);
1945 nla_nest_cancel(skb, nest);
1952 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1954 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1959 return dev_queue->qdisc_sleeping;
1962 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1964 unsigned int ntx = TC_H_MIN(classid);
1966 if (!taprio_queue_get(sch, ntx))
1971 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1972 struct sk_buff *skb, struct tcmsg *tcm)
1974 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1976 tcm->tcm_parent = TC_H_ROOT;
1977 tcm->tcm_handle |= TC_H_MIN(cl);
1978 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1983 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1984 struct gnet_dump *d)
1988 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1990 sch = dev_queue->qdisc_sleeping;
1991 if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 ||
1992 qdisc_qstats_copy(d, sch) < 0)
1997 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1999 struct net_device *dev = qdisc_dev(sch);
2005 arg->count = arg->skip;
2006 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
2007 if (arg->fn(sch, ntx + 1, arg) < 0) {
2015 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
2018 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
2021 static const struct Qdisc_class_ops taprio_class_ops = {
2022 .graft = taprio_graft,
2023 .leaf = taprio_leaf,
2024 .find = taprio_find,
2025 .walk = taprio_walk,
2026 .dump = taprio_dump_class,
2027 .dump_stats = taprio_dump_class_stats,
2028 .select_queue = taprio_select_queue,
2031 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
2032 .cl_ops = &taprio_class_ops,
2034 .priv_size = sizeof(struct taprio_sched),
2035 .init = taprio_init,
2036 .change = taprio_change,
2037 .destroy = taprio_destroy,
2038 .reset = taprio_reset,
2039 .attach = taprio_attach,
2040 .peek = taprio_peek,
2041 .dequeue = taprio_dequeue,
2042 .enqueue = taprio_enqueue,
2043 .dump = taprio_dump,
2044 .owner = THIS_MODULE,
2047 static struct notifier_block taprio_device_notifier = {
2048 .notifier_call = taprio_dev_notifier,
2051 static int __init taprio_module_init(void)
2053 int err = register_netdevice_notifier(&taprio_device_notifier);
2058 return register_qdisc(&taprio_qdisc_ops);
2061 static void __exit taprio_module_exit(void)
2063 unregister_qdisc(&taprio_qdisc_ops);
2064 unregister_netdevice_notifier(&taprio_device_notifier);
2067 module_init(taprio_module_init);
2068 module_exit(taprio_module_exit);
2069 MODULE_LICENSE("GPL");