]> git.itanic.dy.fi Git - linux-stable/blob - drivers/net/ethernet/freescale/enetc/enetc_qos.c
net: enetc: move enetc_set_psfp() out of the common enetc_set_features()
[linux-stable] / drivers / net / ethernet / freescale / enetc / enetc_qos.c
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2019 NXP */
3
4 #include "enetc.h"
5
6 #include <net/pkt_sched.h>
7 #include <linux/math64.h>
8 #include <linux/refcount.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gate.h>
11
12 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
13 {
14         return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
15                 & ENETC_QBV_MAX_GCL_LEN_MASK;
16 }
17
18 void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
19 {
20         u32 old_speed = priv->speed;
21         u32 pspeed;
22
23         if (speed == old_speed)
24                 return;
25
26         switch (speed) {
27         case SPEED_1000:
28                 pspeed = ENETC_PMR_PSPEED_1000M;
29                 break;
30         case SPEED_2500:
31                 pspeed = ENETC_PMR_PSPEED_2500M;
32                 break;
33         case SPEED_100:
34                 pspeed = ENETC_PMR_PSPEED_100M;
35                 break;
36         case SPEED_10:
37         default:
38                 pspeed = ENETC_PMR_PSPEED_10M;
39         }
40
41         priv->speed = speed;
42         enetc_port_wr(&priv->si->hw, ENETC_PMR,
43                       (enetc_port_rd(&priv->si->hw, ENETC_PMR)
44                       & (~ENETC_PMR_PSPEED_MASK))
45                       | pspeed);
46 }
47
48 static int enetc_setup_taprio(struct net_device *ndev,
49                               struct tc_taprio_qopt_offload *admin_conf)
50 {
51         struct enetc_ndev_priv *priv = netdev_priv(ndev);
52         struct enetc_cbd cbd = {.cmd = 0};
53         struct tgs_gcl_conf *gcl_config;
54         struct tgs_gcl_data *gcl_data;
55         dma_addr_t dma;
56         struct gce *gce;
57         u16 data_size;
58         u16 gcl_len;
59         void *tmp;
60         u32 tge;
61         int err;
62         int i;
63
64         if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
65                 return -EINVAL;
66         gcl_len = admin_conf->num_entries;
67
68         tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
69         if (!admin_conf->enable) {
70                 enetc_wr(&priv->si->hw,
71                          ENETC_QBV_PTGCR_OFFSET,
72                          tge & (~ENETC_QBV_TGE));
73
74                 priv->active_offloads &= ~ENETC_F_QBV;
75
76                 return 0;
77         }
78
79         if (admin_conf->cycle_time > U32_MAX ||
80             admin_conf->cycle_time_extension > U32_MAX)
81                 return -EINVAL;
82
83         /* Configure the (administrative) gate control list using the
84          * control BD descriptor.
85          */
86         gcl_config = &cbd.gcl_conf;
87
88         data_size = struct_size(gcl_data, entry, gcl_len);
89         tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
90                                        &dma, (void *)&gcl_data);
91         if (!tmp)
92                 return -ENOMEM;
93
94         gce = (struct gce *)(gcl_data + 1);
95
96         /* Set all gates open as default */
97         gcl_config->atc = 0xff;
98         gcl_config->acl_len = cpu_to_le16(gcl_len);
99
100         gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time));
101         gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time));
102         gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
103         gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
104
105         for (i = 0; i < gcl_len; i++) {
106                 struct tc_taprio_sched_entry *temp_entry;
107                 struct gce *temp_gce = gce + i;
108
109                 temp_entry = &admin_conf->entries[i];
110
111                 temp_gce->gate = (u8)temp_entry->gate_mask;
112                 temp_gce->period = cpu_to_le32(temp_entry->interval);
113         }
114
115         cbd.status_flags = 0;
116
117         cbd.cls = BDCR_CMD_PORT_GCL;
118         cbd.status_flags = 0;
119
120         enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
121                  tge | ENETC_QBV_TGE);
122
123         err = enetc_send_cmd(priv->si, &cbd);
124         if (err)
125                 enetc_wr(&priv->si->hw,
126                          ENETC_QBV_PTGCR_OFFSET,
127                          tge & (~ENETC_QBV_TGE));
128
129         enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
130
131         if (!err)
132                 priv->active_offloads |= ENETC_F_QBV;
133
134         return err;
135 }
136
137 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
138 {
139         struct tc_taprio_qopt_offload *taprio = type_data;
140         struct enetc_ndev_priv *priv = netdev_priv(ndev);
141         int err;
142         int i;
143
144         /* TSD and Qbv are mutually exclusive in hardware */
145         for (i = 0; i < priv->num_tx_rings; i++)
146                 if (priv->tx_ring[i]->tsd_enable)
147                         return -EBUSY;
148
149         for (i = 0; i < priv->num_tx_rings; i++)
150                 enetc_set_bdr_prio(&priv->si->hw,
151                                    priv->tx_ring[i]->index,
152                                    taprio->enable ? i : 0);
153
154         err = enetc_setup_taprio(ndev, taprio);
155
156         if (err)
157                 for (i = 0; i < priv->num_tx_rings; i++)
158                         enetc_set_bdr_prio(&priv->si->hw,
159                                            priv->tx_ring[i]->index,
160                                            taprio->enable ? 0 : i);
161
162         return err;
163 }
164
165 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc)
166 {
167         return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE;
168 }
169
170 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc)
171 {
172         return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK;
173 }
174
175 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
176 {
177         struct enetc_ndev_priv *priv = netdev_priv(ndev);
178         struct tc_cbs_qopt_offload *cbs = type_data;
179         u32 port_transmit_rate = priv->speed;
180         u8 tc_nums = netdev_get_num_tc(ndev);
181         struct enetc_si *si = priv->si;
182         u32 hi_credit_bit, hi_credit_reg;
183         u32 max_interference_size;
184         u32 port_frame_max_size;
185         u8 tc = cbs->queue;
186         u8 prio_top, prio_next;
187         int bw_sum = 0;
188         u8 bw;
189
190         prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
191         prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
192
193         /* Support highest prio and second prio tc in cbs mode */
194         if (tc != prio_top && tc != prio_next)
195                 return -EOPNOTSUPP;
196
197         if (!cbs->enable) {
198                 /* Make sure the other TC that are numerically
199                  * lower than this TC have been disabled.
200                  */
201                 if (tc == prio_top &&
202                     enetc_get_cbs_enable(&si->hw, prio_next)) {
203                         dev_err(&ndev->dev,
204                                 "Disable TC%d before disable TC%d\n",
205                                 prio_next, tc);
206                         return -EINVAL;
207                 }
208
209                 enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
210                 enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
211
212                 return 0;
213         }
214
215         if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L ||
216             cbs->idleslope < 0 || cbs->sendslope > 0)
217                 return -EOPNOTSUPP;
218
219         port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
220
221         bw = cbs->idleslope / (port_transmit_rate * 10UL);
222
223         /* Make sure the other TC that are numerically
224          * higher than this TC have been enabled.
225          */
226         if (tc == prio_next) {
227                 if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
228                         dev_err(&ndev->dev,
229                                 "Enable TC%d first before enable TC%d\n",
230                                 prio_top, prio_next);
231                         return -EINVAL;
232                 }
233                 bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
234         }
235
236         if (bw_sum + bw >= 100) {
237                 dev_err(&ndev->dev,
238                         "The sum of all CBS Bandwidth can't exceed 100\n");
239                 return -EINVAL;
240         }
241
242         enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
243
244         /* For top prio TC, the max_interfrence_size is maxSizedFrame.
245          *
246          * For next prio TC, the max_interfrence_size is calculated as below:
247          *
248          *      max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra)
249          *
250          *      - RA: idleSlope for AVB Class A
251          *      - R0: port transmit rate
252          *      - M0: maximum sized frame for the port
253          *      - MA: maximum sized frame for AVB Class A
254          */
255
256         if (tc == prio_top) {
257                 max_interference_size = port_frame_max_size * 8;
258         } else {
259                 u32 m0, ma, r0, ra;
260
261                 m0 = port_frame_max_size * 8;
262                 ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
263                 ra = enetc_get_cbs_bw(&si->hw, prio_top) *
264                         port_transmit_rate * 10000ULL;
265                 r0 = port_transmit_rate * 1000000ULL;
266                 max_interference_size = m0 + ma +
267                         (u32)div_u64((u64)ra * m0, r0 - ra);
268         }
269
270         /* hiCredit bits calculate by:
271          *
272          * maxSizedFrame * (idleSlope/portTxRate)
273          */
274         hi_credit_bit = max_interference_size * bw / 100;
275
276         /* hiCredit bits to hiCredit register need to calculated as:
277          *
278          * (enetClockFrequency / portTransmitRate) * 100
279          */
280         hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
281                                      port_transmit_rate * 1000000ULL);
282
283         enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
284
285         /* Set bw register and enable this traffic class */
286         enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
287
288         return 0;
289 }
290
291 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
292 {
293         struct enetc_ndev_priv *priv = netdev_priv(ndev);
294         struct tc_etf_qopt_offload *qopt = type_data;
295         u8 tc_nums = netdev_get_num_tc(ndev);
296         int tc;
297
298         if (!tc_nums)
299                 return -EOPNOTSUPP;
300
301         tc = qopt->queue;
302
303         if (tc < 0 || tc >= priv->num_tx_rings)
304                 return -EINVAL;
305
306         /* TSD and Qbv are mutually exclusive in hardware */
307         if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
308                 return -EBUSY;
309
310         priv->tx_ring[tc]->tsd_enable = qopt->enable;
311         enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
312                       qopt->enable ? ENETC_TSDE : 0);
313
314         return 0;
315 }
316
317 enum streamid_type {
318         STREAMID_TYPE_RESERVED = 0,
319         STREAMID_TYPE_NULL,
320         STREAMID_TYPE_SMAC,
321 };
322
323 enum streamid_vlan_tagged {
324         STREAMID_VLAN_RESERVED = 0,
325         STREAMID_VLAN_TAGGED,
326         STREAMID_VLAN_UNTAGGED,
327         STREAMID_VLAN_ALL,
328 };
329
330 #define ENETC_PSFP_WILDCARD -1
331 #define HANDLE_OFFSET 100
332
333 enum forward_type {
334         FILTER_ACTION_TYPE_PSFP = BIT(0),
335         FILTER_ACTION_TYPE_ACL = BIT(1),
336         FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0),
337 };
338
339 /* This is for limit output type for input actions */
340 struct actions_fwd {
341         u64 actions;
342         u64 keys;       /* include the must needed keys */
343         enum forward_type output;
344 };
345
346 struct psfp_streamfilter_counters {
347         u64 matching_frames_count;
348         u64 passing_frames_count;
349         u64 not_passing_frames_count;
350         u64 passing_sdu_count;
351         u64 not_passing_sdu_count;
352         u64 red_frames_count;
353 };
354
355 struct enetc_streamid {
356         u32 index;
357         union {
358                 u8 src_mac[6];
359                 u8 dst_mac[6];
360         };
361         u8 filtertype;
362         u16 vid;
363         u8 tagged;
364         s32 handle;
365 };
366
367 struct enetc_psfp_filter {
368         u32 index;
369         s32 handle;
370         s8 prio;
371         u32 maxsdu;
372         u32 gate_id;
373         s32 meter_id;
374         refcount_t refcount;
375         struct hlist_node node;
376 };
377
378 struct enetc_psfp_gate {
379         u32 index;
380         s8 init_ipv;
381         u64 basetime;
382         u64 cycletime;
383         u64 cycletimext;
384         u32 num_entries;
385         refcount_t refcount;
386         struct hlist_node node;
387         struct action_gate_entry entries[];
388 };
389
390 /* Only enable the green color frame now
391  * Will add eir and ebs color blind, couple flag etc when
392  * policing action add more offloading parameters
393  */
394 struct enetc_psfp_meter {
395         u32 index;
396         u32 cir;
397         u32 cbs;
398         refcount_t refcount;
399         struct hlist_node node;
400 };
401
402 #define ENETC_PSFP_FLAGS_FMI BIT(0)
403
404 struct enetc_stream_filter {
405         struct enetc_streamid sid;
406         u32 sfi_index;
407         u32 sgi_index;
408         u32 flags;
409         u32 fmi_index;
410         struct flow_stats stats;
411         struct hlist_node node;
412 };
413
414 struct enetc_psfp {
415         unsigned long dev_bitmap;
416         unsigned long *psfp_sfi_bitmap;
417         struct hlist_head stream_list;
418         struct hlist_head psfp_filter_list;
419         struct hlist_head psfp_gate_list;
420         struct hlist_head psfp_meter_list;
421         spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
422 };
423
424 static struct actions_fwd enetc_act_fwd[] = {
425         {
426                 BIT(FLOW_ACTION_GATE),
427                 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
428                 FILTER_ACTION_TYPE_PSFP
429         },
430         {
431                 BIT(FLOW_ACTION_POLICE) |
432                 BIT(FLOW_ACTION_GATE),
433                 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
434                 FILTER_ACTION_TYPE_PSFP
435         },
436         /* example for ACL actions */
437         {
438                 BIT(FLOW_ACTION_DROP),
439                 0,
440                 FILTER_ACTION_TYPE_ACL
441         }
442 };
443
444 static struct enetc_psfp epsfp = {
445         .dev_bitmap = 0,
446         .psfp_sfi_bitmap = NULL,
447 };
448
449 static LIST_HEAD(enetc_block_cb_list);
450
451 /* Stream Identity Entry Set Descriptor */
452 static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
453                                  struct enetc_streamid *sid,
454                                  u8 enable)
455 {
456         struct enetc_cbd cbd = {.cmd = 0};
457         struct streamid_data *si_data;
458         struct streamid_conf *si_conf;
459         dma_addr_t dma;
460         u16 data_size;
461         void *tmp;
462         int port;
463         int err;
464
465         port = enetc_pf_to_port(priv->si->pdev);
466         if (port < 0)
467                 return -EINVAL;
468
469         if (sid->index >= priv->psfp_cap.max_streamid)
470                 return -EINVAL;
471
472         if (sid->filtertype != STREAMID_TYPE_NULL &&
473             sid->filtertype != STREAMID_TYPE_SMAC)
474                 return -EOPNOTSUPP;
475
476         /* Disable operation before enable */
477         cbd.index = cpu_to_le16((u16)sid->index);
478         cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
479         cbd.status_flags = 0;
480
481         data_size = sizeof(struct streamid_data);
482         tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
483                                        &dma, (void *)&si_data);
484         if (!tmp)
485                 return -ENOMEM;
486
487         eth_broadcast_addr(si_data->dmac);
488         si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
489                                + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
490
491         si_conf = &cbd.sid_set;
492         /* Only one port supported for one entry, set itself */
493         si_conf->iports = cpu_to_le32(1 << port);
494         si_conf->id_type = 1;
495         si_conf->oui[2] = 0x0;
496         si_conf->oui[1] = 0x80;
497         si_conf->oui[0] = 0xC2;
498
499         err = enetc_send_cmd(priv->si, &cbd);
500         if (err)
501                 goto out;
502
503         if (!enable)
504                 goto out;
505
506         /* Enable the entry overwrite again incase space flushed by hardware */
507         cbd.status_flags = 0;
508
509         si_conf->en = 0x80;
510         si_conf->stream_handle = cpu_to_le32(sid->handle);
511         si_conf->iports = cpu_to_le32(1 << port);
512         si_conf->id_type = sid->filtertype;
513         si_conf->oui[2] = 0x0;
514         si_conf->oui[1] = 0x80;
515         si_conf->oui[0] = 0xC2;
516
517         memset(si_data, 0, data_size);
518
519         /* VIDM default to be 1.
520          * VID Match. If set (b1) then the VID must match, otherwise
521          * any VID is considered a match. VIDM setting is only used
522          * when TG is set to b01.
523          */
524         if (si_conf->id_type == STREAMID_TYPE_NULL) {
525                 ether_addr_copy(si_data->dmac, sid->dst_mac);
526                 si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
527                                        ((((u16)(sid->tagged) & 0x3) << 14)
528                                        | ENETC_CBDR_SID_VIDM);
529         } else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
530                 ether_addr_copy(si_data->smac, sid->src_mac);
531                 si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
532                                        ((((u16)(sid->tagged) & 0x3) << 14)
533                                        | ENETC_CBDR_SID_VIDM);
534         }
535
536         err = enetc_send_cmd(priv->si, &cbd);
537 out:
538         enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
539
540         return err;
541 }
542
543 /* Stream Filter Instance Set Descriptor */
544 static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
545                                      struct enetc_psfp_filter *sfi,
546                                      u8 enable)
547 {
548         struct enetc_cbd cbd = {.cmd = 0};
549         struct sfi_conf *sfi_config;
550         int port;
551
552         port = enetc_pf_to_port(priv->si->pdev);
553         if (port < 0)
554                 return -EINVAL;
555
556         cbd.index = cpu_to_le16(sfi->index);
557         cbd.cls = BDCR_CMD_STREAM_FILTER;
558         cbd.status_flags = 0x80;
559         cbd.length = cpu_to_le16(1);
560
561         sfi_config = &cbd.sfi_conf;
562         if (!enable)
563                 goto exit;
564
565         sfi_config->en = 0x80;
566
567         if (sfi->handle >= 0) {
568                 sfi_config->stream_handle =
569                         cpu_to_le32(sfi->handle);
570                 sfi_config->sthm |= 0x80;
571         }
572
573         sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
574         sfi_config->input_ports = cpu_to_le32(1 << port);
575
576         /* The priority value which may be matched against the
577          * frame’s priority value to determine a match for this entry.
578          */
579         if (sfi->prio >= 0)
580                 sfi_config->multi |= (sfi->prio & 0x7) | 0x8;
581
582         /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
583          * field as being either an MSDU value or an index into the Flow
584          * Meter Instance table.
585          */
586         if (sfi->maxsdu) {
587                 sfi_config->msdu =
588                 cpu_to_le16(sfi->maxsdu);
589                 sfi_config->multi |= 0x40;
590         }
591
592         if (sfi->meter_id >= 0) {
593                 sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id);
594                 sfi_config->multi |= 0x80;
595         }
596
597 exit:
598         return enetc_send_cmd(priv->si, &cbd);
599 }
600
601 static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
602                                       u32 index,
603                                       struct psfp_streamfilter_counters *cnt)
604 {
605         struct enetc_cbd cbd = { .cmd = 2 };
606         struct sfi_counter_data *data_buf;
607         dma_addr_t dma;
608         u16 data_size;
609         void *tmp;
610         int err;
611
612         cbd.index = cpu_to_le16((u16)index);
613         cbd.cmd = 2;
614         cbd.cls = BDCR_CMD_STREAM_FILTER;
615         cbd.status_flags = 0;
616
617         data_size = sizeof(struct sfi_counter_data);
618
619         tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
620                                        &dma, (void *)&data_buf);
621         if (!tmp)
622                 return -ENOMEM;
623
624         err = enetc_send_cmd(priv->si, &cbd);
625         if (err)
626                 goto exit;
627
628         cnt->matching_frames_count = ((u64)data_buf->matchh << 32) +
629                                      data_buf->matchl;
630
631         cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) +
632                                      data_buf->msdu_dropl;
633
634         cnt->passing_sdu_count = cnt->matching_frames_count
635                                 - cnt->not_passing_sdu_count;
636
637         cnt->not_passing_frames_count =
638                                 ((u64)data_buf->stream_gate_droph << 32) +
639                                 data_buf->stream_gate_dropl;
640
641         cnt->passing_frames_count = cnt->matching_frames_count -
642                                     cnt->not_passing_sdu_count -
643                                     cnt->not_passing_frames_count;
644
645         cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) +
646                                 data_buf->flow_meter_dropl;
647
648 exit:
649         enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
650
651         return err;
652 }
653
654 static u64 get_ptp_now(struct enetc_hw *hw)
655 {
656         u64 now_lo, now_hi, now;
657
658         now_lo = enetc_rd(hw, ENETC_SICTR0);
659         now_hi = enetc_rd(hw, ENETC_SICTR1);
660         now = now_lo | now_hi << 32;
661
662         return now;
663 }
664
665 static int get_start_ns(u64 now, u64 cycle, u64 *start)
666 {
667         u64 n;
668
669         if (!cycle)
670                 return -EFAULT;
671
672         n = div64_u64(now, cycle);
673
674         *start = (n + 1) * cycle;
675
676         return 0;
677 }
678
679 /* Stream Gate Instance Set Descriptor */
680 static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
681                                    struct enetc_psfp_gate *sgi,
682                                    u8 enable)
683 {
684         struct enetc_cbd cbd = { .cmd = 0 };
685         struct sgi_table *sgi_config;
686         struct sgcl_conf *sgcl_config;
687         struct sgcl_data *sgcl_data;
688         struct sgce *sgce;
689         dma_addr_t dma;
690         u16 data_size;
691         int err, i;
692         void *tmp;
693         u64 now;
694
695         cbd.index = cpu_to_le16(sgi->index);
696         cbd.cmd = 0;
697         cbd.cls = BDCR_CMD_STREAM_GCL;
698         cbd.status_flags = 0x80;
699
700         /* disable */
701         if (!enable)
702                 return enetc_send_cmd(priv->si, &cbd);
703
704         if (!sgi->num_entries)
705                 return 0;
706
707         if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist ||
708             !sgi->cycletime)
709                 return -EINVAL;
710
711         /* enable */
712         sgi_config = &cbd.sgi_table;
713
714         /* Keep open before gate list start */
715         sgi_config->ocgtst = 0x80;
716
717         sgi_config->oipv = (sgi->init_ipv < 0) ?
718                                 0x0 : ((sgi->init_ipv & 0x7) | 0x8);
719
720         sgi_config->en = 0x80;
721
722         /* Basic config */
723         err = enetc_send_cmd(priv->si, &cbd);
724         if (err)
725                 return -EINVAL;
726
727         memset(&cbd, 0, sizeof(cbd));
728
729         cbd.index = cpu_to_le16(sgi->index);
730         cbd.cmd = 1;
731         cbd.cls = BDCR_CMD_STREAM_GCL;
732         cbd.status_flags = 0;
733
734         sgcl_config = &cbd.sgcl_conf;
735
736         sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
737
738         data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
739         tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
740                                        &dma, (void *)&sgcl_data);
741         if (!tmp)
742                 return -ENOMEM;
743
744         sgce = &sgcl_data->sgcl[0];
745
746         sgcl_config->agtst = 0x80;
747
748         sgcl_data->ct = sgi->cycletime;
749         sgcl_data->cte = sgi->cycletimext;
750
751         if (sgi->init_ipv >= 0)
752                 sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
753
754         for (i = 0; i < sgi->num_entries; i++) {
755                 struct action_gate_entry *from = &sgi->entries[i];
756                 struct sgce *to = &sgce[i];
757
758                 if (from->gate_state)
759                         to->multi |= 0x10;
760
761                 if (from->ipv >= 0)
762                         to->multi |= ((from->ipv & 0x7) << 5) | 0x08;
763
764                 if (from->maxoctets >= 0) {
765                         to->multi |= 0x01;
766                         to->msdu[0] = from->maxoctets & 0xFF;
767                         to->msdu[1] = (from->maxoctets >> 8) & 0xFF;
768                         to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
769                 }
770
771                 to->interval = from->interval;
772         }
773
774         /* If basetime is less than now, calculate start time */
775         now = get_ptp_now(&priv->si->hw);
776
777         if (sgi->basetime < now) {
778                 u64 start;
779
780                 err = get_start_ns(now, sgi->cycletime, &start);
781                 if (err)
782                         goto exit;
783                 sgcl_data->btl = lower_32_bits(start);
784                 sgcl_data->bth = upper_32_bits(start);
785         } else {
786                 u32 hi, lo;
787
788                 hi = upper_32_bits(sgi->basetime);
789                 lo = lower_32_bits(sgi->basetime);
790                 sgcl_data->bth = hi;
791                 sgcl_data->btl = lo;
792         }
793
794         err = enetc_send_cmd(priv->si, &cbd);
795
796 exit:
797         enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
798         return err;
799 }
800
801 static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv,
802                                   struct enetc_psfp_meter *fmi,
803                                   u8 enable)
804 {
805         struct enetc_cbd cbd = { .cmd = 0 };
806         struct fmi_conf *fmi_config;
807         u64 temp = 0;
808
809         cbd.index = cpu_to_le16((u16)fmi->index);
810         cbd.cls = BDCR_CMD_FLOW_METER;
811         cbd.status_flags = 0x80;
812
813         if (!enable)
814                 return enetc_send_cmd(priv->si, &cbd);
815
816         fmi_config = &cbd.fmi_conf;
817         fmi_config->en = 0x80;
818
819         if (fmi->cir) {
820                 temp = (u64)8000 * fmi->cir;
821                 temp = div_u64(temp, 3725);
822         }
823
824         fmi_config->cir = cpu_to_le32((u32)temp);
825         fmi_config->cbs = cpu_to_le32(fmi->cbs);
826
827         /* Default for eir ebs disable */
828         fmi_config->eir = 0;
829         fmi_config->ebs = 0;
830
831         /* Default:
832          * mark red disable
833          * drop on yellow disable
834          * color mode disable
835          * couple flag disable
836          */
837         fmi_config->conf = 0;
838
839         return enetc_send_cmd(priv->si, &cbd);
840 }
841
842 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index)
843 {
844         struct enetc_stream_filter *f;
845
846         hlist_for_each_entry(f, &epsfp.stream_list, node)
847                 if (f->sid.index == index)
848                         return f;
849
850         return NULL;
851 }
852
853 static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index)
854 {
855         struct enetc_psfp_gate *g;
856
857         hlist_for_each_entry(g, &epsfp.psfp_gate_list, node)
858                 if (g->index == index)
859                         return g;
860
861         return NULL;
862 }
863
864 static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index)
865 {
866         struct enetc_psfp_filter *s;
867
868         hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
869                 if (s->index == index)
870                         return s;
871
872         return NULL;
873 }
874
875 static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index)
876 {
877         struct enetc_psfp_meter *m;
878
879         hlist_for_each_entry(m, &epsfp.psfp_meter_list, node)
880                 if (m->index == index)
881                         return m;
882
883         return NULL;
884 }
885
886 static struct enetc_psfp_filter
887         *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi)
888 {
889         struct enetc_psfp_filter *s;
890
891         hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
892                 if (s->gate_id == sfi->gate_id &&
893                     s->prio == sfi->prio &&
894                     s->maxsdu == sfi->maxsdu &&
895                     s->meter_id == sfi->meter_id)
896                         return s;
897
898         return NULL;
899 }
900
901 static int enetc_get_free_index(struct enetc_ndev_priv *priv)
902 {
903         u32 max_size = priv->psfp_cap.max_psfp_filter;
904         unsigned long index;
905
906         index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size);
907         if (index == max_size)
908                 return -1;
909
910         return index;
911 }
912
913 static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index)
914 {
915         struct enetc_psfp_filter *sfi;
916         u8 z;
917
918         sfi = enetc_get_filter_by_index(index);
919         WARN_ON(!sfi);
920         z = refcount_dec_and_test(&sfi->refcount);
921
922         if (z) {
923                 enetc_streamfilter_hw_set(priv, sfi, false);
924                 hlist_del(&sfi->node);
925                 kfree(sfi);
926                 clear_bit(index, epsfp.psfp_sfi_bitmap);
927         }
928 }
929
930 static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index)
931 {
932         struct enetc_psfp_gate *sgi;
933         u8 z;
934
935         sgi = enetc_get_gate_by_index(index);
936         WARN_ON(!sgi);
937         z = refcount_dec_and_test(&sgi->refcount);
938         if (z) {
939                 enetc_streamgate_hw_set(priv, sgi, false);
940                 hlist_del(&sgi->node);
941                 kfree(sgi);
942         }
943 }
944
945 static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index)
946 {
947         struct enetc_psfp_meter *fmi;
948         u8 z;
949
950         fmi = enetc_get_meter_by_index(index);
951         WARN_ON(!fmi);
952         z = refcount_dec_and_test(&fmi->refcount);
953         if (z) {
954                 enetc_flowmeter_hw_set(priv, fmi, false);
955                 hlist_del(&fmi->node);
956                 kfree(fmi);
957         }
958 }
959
960 static void remove_one_chain(struct enetc_ndev_priv *priv,
961                              struct enetc_stream_filter *filter)
962 {
963         if (filter->flags & ENETC_PSFP_FLAGS_FMI)
964                 flow_meter_unref(priv, filter->fmi_index);
965
966         stream_gate_unref(priv, filter->sgi_index);
967         stream_filter_unref(priv, filter->sfi_index);
968
969         hlist_del(&filter->node);
970         kfree(filter);
971 }
972
973 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
974                              struct enetc_streamid *sid,
975                              struct enetc_psfp_filter *sfi,
976                              struct enetc_psfp_gate *sgi,
977                              struct enetc_psfp_meter *fmi)
978 {
979         int err;
980
981         err = enetc_streamid_hw_set(priv, sid, true);
982         if (err)
983                 return err;
984
985         if (sfi) {
986                 err = enetc_streamfilter_hw_set(priv, sfi, true);
987                 if (err)
988                         goto revert_sid;
989         }
990
991         err = enetc_streamgate_hw_set(priv, sgi, true);
992         if (err)
993                 goto revert_sfi;
994
995         if (fmi) {
996                 err = enetc_flowmeter_hw_set(priv, fmi, true);
997                 if (err)
998                         goto revert_sgi;
999         }
1000
1001         return 0;
1002
1003 revert_sgi:
1004         enetc_streamgate_hw_set(priv, sgi, false);
1005 revert_sfi:
1006         if (sfi)
1007                 enetc_streamfilter_hw_set(priv, sfi, false);
1008 revert_sid:
1009         enetc_streamid_hw_set(priv, sid, false);
1010         return err;
1011 }
1012
1013 static struct actions_fwd *enetc_check_flow_actions(u64 acts,
1014                                                     unsigned int inputkeys)
1015 {
1016         int i;
1017
1018         for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++)
1019                 if (acts == enetc_act_fwd[i].actions &&
1020                     inputkeys & enetc_act_fwd[i].keys)
1021                         return &enetc_act_fwd[i];
1022
1023         return NULL;
1024 }
1025
1026 static int enetc_psfp_policer_validate(const struct flow_action *action,
1027                                        const struct flow_action_entry *act,
1028                                        struct netlink_ext_ack *extack)
1029 {
1030         if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1031                 NL_SET_ERR_MSG_MOD(extack,
1032                                    "Offload not supported when exceed action is not drop");
1033                 return -EOPNOTSUPP;
1034         }
1035
1036         if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1037             act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1038                 NL_SET_ERR_MSG_MOD(extack,
1039                                    "Offload not supported when conform action is not pipe or ok");
1040                 return -EOPNOTSUPP;
1041         }
1042
1043         if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1044             !flow_action_is_last_entry(action, act)) {
1045                 NL_SET_ERR_MSG_MOD(extack,
1046                                    "Offload not supported when conform action is ok, but action is not last");
1047                 return -EOPNOTSUPP;
1048         }
1049
1050         if (act->police.peakrate_bytes_ps ||
1051             act->police.avrate || act->police.overhead) {
1052                 NL_SET_ERR_MSG_MOD(extack,
1053                                    "Offload not supported when peakrate/avrate/overhead is configured");
1054                 return -EOPNOTSUPP;
1055         }
1056
1057         if (act->police.rate_pkt_ps) {
1058                 NL_SET_ERR_MSG_MOD(extack,
1059                                    "QoS offload not support packets per second");
1060                 return -EOPNOTSUPP;
1061         }
1062
1063         return 0;
1064 }
1065
1066 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
1067                                       struct flow_cls_offload *f)
1068 {
1069         struct flow_action_entry *entryg = NULL, *entryp = NULL;
1070         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1071         struct netlink_ext_ack *extack = f->common.extack;
1072         struct enetc_stream_filter *filter, *old_filter;
1073         struct enetc_psfp_meter *fmi = NULL, *old_fmi;
1074         struct enetc_psfp_filter *sfi, *old_sfi;
1075         struct enetc_psfp_gate *sgi, *old_sgi;
1076         struct flow_action_entry *entry;
1077         struct action_gate_entry *e;
1078         u8 sfi_overwrite = 0;
1079         int entries_size;
1080         int i, err;
1081
1082         if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
1083                 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
1084                 return -ENOSPC;
1085         }
1086
1087         flow_action_for_each(i, entry, &rule->action)
1088                 if (entry->id == FLOW_ACTION_GATE)
1089                         entryg = entry;
1090                 else if (entry->id == FLOW_ACTION_POLICE)
1091                         entryp = entry;
1092
1093         /* Not support without gate action */
1094         if (!entryg)
1095                 return -EINVAL;
1096
1097         filter = kzalloc(sizeof(*filter), GFP_KERNEL);
1098         if (!filter)
1099                 return -ENOMEM;
1100
1101         filter->sid.index = f->common.chain_index;
1102
1103         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1104                 struct flow_match_eth_addrs match;
1105
1106                 flow_rule_match_eth_addrs(rule, &match);
1107
1108                 if (!is_zero_ether_addr(match.mask->dst) &&
1109                     !is_zero_ether_addr(match.mask->src)) {
1110                         NL_SET_ERR_MSG_MOD(extack,
1111                                            "Cannot match on both source and destination MAC");
1112                         err = -EINVAL;
1113                         goto free_filter;
1114                 }
1115
1116                 if (!is_zero_ether_addr(match.mask->dst)) {
1117                         if (!is_broadcast_ether_addr(match.mask->dst)) {
1118                                 NL_SET_ERR_MSG_MOD(extack,
1119                                                    "Masked matching on destination MAC not supported");
1120                                 err = -EINVAL;
1121                                 goto free_filter;
1122                         }
1123                         ether_addr_copy(filter->sid.dst_mac, match.key->dst);
1124                         filter->sid.filtertype = STREAMID_TYPE_NULL;
1125                 }
1126
1127                 if (!is_zero_ether_addr(match.mask->src)) {
1128                         if (!is_broadcast_ether_addr(match.mask->src)) {
1129                                 NL_SET_ERR_MSG_MOD(extack,
1130                                                    "Masked matching on source MAC not supported");
1131                                 err = -EINVAL;
1132                                 goto free_filter;
1133                         }
1134                         ether_addr_copy(filter->sid.src_mac, match.key->src);
1135                         filter->sid.filtertype = STREAMID_TYPE_SMAC;
1136                 }
1137         } else {
1138                 NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
1139                 err = -EINVAL;
1140                 goto free_filter;
1141         }
1142
1143         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1144                 struct flow_match_vlan match;
1145
1146                 flow_rule_match_vlan(rule, &match);
1147                 if (match.mask->vlan_priority) {
1148                         if (match.mask->vlan_priority !=
1149                             (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) {
1150                                 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
1151                                 err = -EINVAL;
1152                                 goto free_filter;
1153                         }
1154                 }
1155
1156                 if (match.mask->vlan_id) {
1157                         if (match.mask->vlan_id != VLAN_VID_MASK) {
1158                                 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id");
1159                                 err = -EINVAL;
1160                                 goto free_filter;
1161                         }
1162
1163                         filter->sid.vid = match.key->vlan_id;
1164                         if (!filter->sid.vid)
1165                                 filter->sid.tagged = STREAMID_VLAN_UNTAGGED;
1166                         else
1167                                 filter->sid.tagged = STREAMID_VLAN_TAGGED;
1168                 }
1169         } else {
1170                 filter->sid.tagged = STREAMID_VLAN_ALL;
1171         }
1172
1173         /* parsing gate action */
1174         if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
1175                 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
1176                 err = -ENOSPC;
1177                 goto free_filter;
1178         }
1179
1180         if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
1181                 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
1182                 err = -ENOSPC;
1183                 goto free_filter;
1184         }
1185
1186         entries_size = struct_size(sgi, entries, entryg->gate.num_entries);
1187         sgi = kzalloc(entries_size, GFP_KERNEL);
1188         if (!sgi) {
1189                 err = -ENOMEM;
1190                 goto free_filter;
1191         }
1192
1193         refcount_set(&sgi->refcount, 1);
1194         sgi->index = entryg->hw_index;
1195         sgi->init_ipv = entryg->gate.prio;
1196         sgi->basetime = entryg->gate.basetime;
1197         sgi->cycletime = entryg->gate.cycletime;
1198         sgi->num_entries = entryg->gate.num_entries;
1199
1200         e = sgi->entries;
1201         for (i = 0; i < entryg->gate.num_entries; i++) {
1202                 e[i].gate_state = entryg->gate.entries[i].gate_state;
1203                 e[i].interval = entryg->gate.entries[i].interval;
1204                 e[i].ipv = entryg->gate.entries[i].ipv;
1205                 e[i].maxoctets = entryg->gate.entries[i].maxoctets;
1206         }
1207
1208         filter->sgi_index = sgi->index;
1209
1210         sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
1211         if (!sfi) {
1212                 err = -ENOMEM;
1213                 goto free_gate;
1214         }
1215
1216         refcount_set(&sfi->refcount, 1);
1217         sfi->gate_id = sgi->index;
1218         sfi->meter_id = ENETC_PSFP_WILDCARD;
1219
1220         /* Flow meter and max frame size */
1221         if (entryp) {
1222                 err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
1223                 if (err)
1224                         goto free_sfi;
1225
1226                 if (entryp->police.burst) {
1227                         fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
1228                         if (!fmi) {
1229                                 err = -ENOMEM;
1230                                 goto free_sfi;
1231                         }
1232                         refcount_set(&fmi->refcount, 1);
1233                         fmi->cir = entryp->police.rate_bytes_ps;
1234                         fmi->cbs = entryp->police.burst;
1235                         fmi->index = entryp->hw_index;
1236                         filter->flags |= ENETC_PSFP_FLAGS_FMI;
1237                         filter->fmi_index = fmi->index;
1238                         sfi->meter_id = fmi->index;
1239                 }
1240
1241                 if (entryp->police.mtu)
1242                         sfi->maxsdu = entryp->police.mtu;
1243         }
1244
1245         /* prio ref the filter prio */
1246         if (f->common.prio && f->common.prio <= BIT(3))
1247                 sfi->prio = f->common.prio - 1;
1248         else
1249                 sfi->prio = ENETC_PSFP_WILDCARD;
1250
1251         old_sfi = enetc_psfp_check_sfi(sfi);
1252         if (!old_sfi) {
1253                 int index;
1254
1255                 index = enetc_get_free_index(priv);
1256                 if (sfi->handle < 0) {
1257                         NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
1258                         err = -ENOSPC;
1259                         goto free_fmi;
1260                 }
1261
1262                 sfi->index = index;
1263                 sfi->handle = index + HANDLE_OFFSET;
1264                 /* Update the stream filter handle also */
1265                 filter->sid.handle = sfi->handle;
1266                 filter->sfi_index = sfi->index;
1267                 sfi_overwrite = 0;
1268         } else {
1269                 filter->sfi_index = old_sfi->index;
1270                 filter->sid.handle = old_sfi->handle;
1271                 sfi_overwrite = 1;
1272         }
1273
1274         err = enetc_psfp_hw_set(priv, &filter->sid,
1275                                 sfi_overwrite ? NULL : sfi, sgi, fmi);
1276         if (err)
1277                 goto free_fmi;
1278
1279         spin_lock(&epsfp.psfp_lock);
1280         if (filter->flags & ENETC_PSFP_FLAGS_FMI) {
1281                 old_fmi = enetc_get_meter_by_index(filter->fmi_index);
1282                 if (old_fmi) {
1283                         fmi->refcount = old_fmi->refcount;
1284                         refcount_set(&fmi->refcount,
1285                                      refcount_read(&old_fmi->refcount) + 1);
1286                         hlist_del(&old_fmi->node);
1287                         kfree(old_fmi);
1288                 }
1289                 hlist_add_head(&fmi->node, &epsfp.psfp_meter_list);
1290         }
1291
1292         /* Remove the old node if exist and update with a new node */
1293         old_sgi = enetc_get_gate_by_index(filter->sgi_index);
1294         if (old_sgi) {
1295                 refcount_set(&sgi->refcount,
1296                              refcount_read(&old_sgi->refcount) + 1);
1297                 hlist_del(&old_sgi->node);
1298                 kfree(old_sgi);
1299         }
1300
1301         hlist_add_head(&sgi->node, &epsfp.psfp_gate_list);
1302
1303         if (!old_sfi) {
1304                 hlist_add_head(&sfi->node, &epsfp.psfp_filter_list);
1305                 set_bit(sfi->index, epsfp.psfp_sfi_bitmap);
1306         } else {
1307                 kfree(sfi);
1308                 refcount_inc(&old_sfi->refcount);
1309         }
1310
1311         old_filter = enetc_get_stream_by_index(filter->sid.index);
1312         if (old_filter)
1313                 remove_one_chain(priv, old_filter);
1314
1315         filter->stats.lastused = jiffies;
1316         hlist_add_head(&filter->node, &epsfp.stream_list);
1317
1318         spin_unlock(&epsfp.psfp_lock);
1319
1320         return 0;
1321
1322 free_fmi:
1323         kfree(fmi);
1324 free_sfi:
1325         kfree(sfi);
1326 free_gate:
1327         kfree(sgi);
1328 free_filter:
1329         kfree(filter);
1330
1331         return err;
1332 }
1333
1334 static int enetc_config_clsflower(struct enetc_ndev_priv *priv,
1335                                   struct flow_cls_offload *cls_flower)
1336 {
1337         struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1338         struct netlink_ext_ack *extack = cls_flower->common.extack;
1339         struct flow_dissector *dissector = rule->match.dissector;
1340         struct flow_action *action = &rule->action;
1341         struct flow_action_entry *entry;
1342         struct actions_fwd *fwd;
1343         u64 actions = 0;
1344         int i, err;
1345
1346         if (!flow_action_has_entries(action)) {
1347                 NL_SET_ERR_MSG_MOD(extack, "At least one action is needed");
1348                 return -EINVAL;
1349         }
1350
1351         flow_action_for_each(i, entry, action)
1352                 actions |= BIT(entry->id);
1353
1354         fwd = enetc_check_flow_actions(actions, dissector->used_keys);
1355         if (!fwd) {
1356                 NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!");
1357                 return -EOPNOTSUPP;
1358         }
1359
1360         if (fwd->output & FILTER_ACTION_TYPE_PSFP) {
1361                 err = enetc_psfp_parse_clsflower(priv, cls_flower);
1362                 if (err) {
1363                         NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs");
1364                         return err;
1365                 }
1366         } else {
1367                 NL_SET_ERR_MSG_MOD(extack, "Unsupported actions");
1368                 return -EOPNOTSUPP;
1369         }
1370
1371         return 0;
1372 }
1373
1374 static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv,
1375                                         struct flow_cls_offload *f)
1376 {
1377         struct enetc_stream_filter *filter;
1378         struct netlink_ext_ack *extack = f->common.extack;
1379         int err;
1380
1381         if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
1382                 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
1383                 return -ENOSPC;
1384         }
1385
1386         filter = enetc_get_stream_by_index(f->common.chain_index);
1387         if (!filter)
1388                 return -EINVAL;
1389
1390         err = enetc_streamid_hw_set(priv, &filter->sid, false);
1391         if (err)
1392                 return err;
1393
1394         remove_one_chain(priv, filter);
1395
1396         return 0;
1397 }
1398
1399 static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv,
1400                                    struct flow_cls_offload *f)
1401 {
1402         return enetc_psfp_destroy_clsflower(priv, f);
1403 }
1404
1405 static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv,
1406                                 struct flow_cls_offload *f)
1407 {
1408         struct psfp_streamfilter_counters counters = {};
1409         struct enetc_stream_filter *filter;
1410         struct flow_stats stats = {};
1411         int err;
1412
1413         filter = enetc_get_stream_by_index(f->common.chain_index);
1414         if (!filter)
1415                 return -EINVAL;
1416
1417         err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters);
1418         if (err)
1419                 return -EINVAL;
1420
1421         spin_lock(&epsfp.psfp_lock);
1422         stats.pkts = counters.matching_frames_count +
1423                      counters.not_passing_sdu_count -
1424                      filter->stats.pkts;
1425         stats.drops = counters.not_passing_frames_count +
1426                       counters.not_passing_sdu_count +
1427                       counters.red_frames_count -
1428                       filter->stats.drops;
1429         stats.lastused = filter->stats.lastused;
1430         filter->stats.pkts += stats.pkts;
1431         filter->stats.drops += stats.drops;
1432         spin_unlock(&epsfp.psfp_lock);
1433
1434         flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops,
1435                           stats.lastused, FLOW_ACTION_HW_STATS_DELAYED);
1436
1437         return 0;
1438 }
1439
1440 static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv,
1441                                      struct flow_cls_offload *cls_flower)
1442 {
1443         switch (cls_flower->command) {
1444         case FLOW_CLS_REPLACE:
1445                 return enetc_config_clsflower(priv, cls_flower);
1446         case FLOW_CLS_DESTROY:
1447                 return enetc_destroy_clsflower(priv, cls_flower);
1448         case FLOW_CLS_STATS:
1449                 return enetc_psfp_get_stats(priv, cls_flower);
1450         default:
1451                 return -EOPNOTSUPP;
1452         }
1453 }
1454
1455 static inline void clean_psfp_sfi_bitmap(void)
1456 {
1457         bitmap_free(epsfp.psfp_sfi_bitmap);
1458         epsfp.psfp_sfi_bitmap = NULL;
1459 }
1460
1461 static void clean_stream_list(void)
1462 {
1463         struct enetc_stream_filter *s;
1464         struct hlist_node *tmp;
1465
1466         hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) {
1467                 hlist_del(&s->node);
1468                 kfree(s);
1469         }
1470 }
1471
1472 static void clean_sfi_list(void)
1473 {
1474         struct enetc_psfp_filter *sfi;
1475         struct hlist_node *tmp;
1476
1477         hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) {
1478                 hlist_del(&sfi->node);
1479                 kfree(sfi);
1480         }
1481 }
1482
1483 static void clean_sgi_list(void)
1484 {
1485         struct enetc_psfp_gate *sgi;
1486         struct hlist_node *tmp;
1487
1488         hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) {
1489                 hlist_del(&sgi->node);
1490                 kfree(sgi);
1491         }
1492 }
1493
1494 static void clean_psfp_all(void)
1495 {
1496         /* Disable all list nodes and free all memory */
1497         clean_sfi_list();
1498         clean_sgi_list();
1499         clean_stream_list();
1500         epsfp.dev_bitmap = 0;
1501         clean_psfp_sfi_bitmap();
1502 }
1503
1504 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1505                             void *cb_priv)
1506 {
1507         struct net_device *ndev = cb_priv;
1508
1509         if (!tc_can_offload(ndev))
1510                 return -EOPNOTSUPP;
1511
1512         switch (type) {
1513         case TC_SETUP_CLSFLOWER:
1514                 return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data);
1515         default:
1516                 return -EOPNOTSUPP;
1517         }
1518 }
1519
1520 int enetc_set_psfp(struct net_device *ndev, bool en)
1521 {
1522         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1523         int err;
1524
1525         if (en) {
1526                 err = enetc_psfp_enable(priv);
1527                 if (err)
1528                         return err;
1529
1530                 priv->active_offloads |= ENETC_F_QCI;
1531                 return 0;
1532         }
1533
1534         err = enetc_psfp_disable(priv);
1535         if (err)
1536                 return err;
1537
1538         priv->active_offloads &= ~ENETC_F_QCI;
1539
1540         return 0;
1541 }
1542
1543 int enetc_psfp_init(struct enetc_ndev_priv *priv)
1544 {
1545         if (epsfp.psfp_sfi_bitmap)
1546                 return 0;
1547
1548         epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter,
1549                                               GFP_KERNEL);
1550         if (!epsfp.psfp_sfi_bitmap)
1551                 return -ENOMEM;
1552
1553         spin_lock_init(&epsfp.psfp_lock);
1554
1555         if (list_empty(&enetc_block_cb_list))
1556                 epsfp.dev_bitmap = 0;
1557
1558         return 0;
1559 }
1560
1561 int enetc_psfp_clean(struct enetc_ndev_priv *priv)
1562 {
1563         if (!list_empty(&enetc_block_cb_list))
1564                 return -EBUSY;
1565
1566         clean_psfp_all();
1567
1568         return 0;
1569 }
1570
1571 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
1572 {
1573         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1574         struct flow_block_offload *f = type_data;
1575         int port, err;
1576
1577         err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
1578                                          enetc_setup_tc_block_cb,
1579                                          ndev, ndev, true);
1580         if (err)
1581                 return err;
1582
1583         switch (f->command) {
1584         case FLOW_BLOCK_BIND:
1585                 port = enetc_pf_to_port(priv->si->pdev);
1586                 if (port < 0)
1587                         return -EINVAL;
1588
1589                 set_bit(port, &epsfp.dev_bitmap);
1590                 break;
1591         case FLOW_BLOCK_UNBIND:
1592                 port = enetc_pf_to_port(priv->si->pdev);
1593                 if (port < 0)
1594                         return -EINVAL;
1595
1596                 clear_bit(port, &epsfp.dev_bitmap);
1597                 if (!epsfp.dev_bitmap)
1598                         clean_psfp_all();
1599                 break;
1600         }
1601
1602         return 0;
1603 }