]> git.itanic.dy.fi Git - linux-stable/blob - drivers/nvme/target/tcp.c
nvmet-tcp: fix unhandled tcp states in nvmet_tcp_state_change()
[linux-stable] / drivers / nvme / target / tcp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics TCP target.
4  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
17
18 #include "nvmet.h"
19
20 #define NVMET_TCP_DEF_INLINE_DATA_SIZE  (4 * PAGE_SIZE)
21
22 /* Define the socket priority to use for connections were it is desirable
23  * that the NIC consider performing optimized packet processing or filtering.
24  * A non-zero value being sufficient to indicate general consideration of any
25  * possible optimization.  Making it a module param allows for alternative
26  * values that may be unique for some NIC implementations.
27  */
28 static int so_priority;
29 module_param(so_priority, int, 0644);
30 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
31
32 /* Define a time period (in usecs) that io_work() shall sample an activated
33  * queue before determining it to be idle.  This optional module behavior
34  * can enable NIC solutions that support socket optimized packet processing
35  * using advanced interrupt moderation techniques.
36  */
37 static int idle_poll_period_usecs;
38 module_param(idle_poll_period_usecs, int, 0644);
39 MODULE_PARM_DESC(idle_poll_period_usecs,
40                 "nvmet tcp io_work poll till idle time period in usecs");
41
42 #define NVMET_TCP_RECV_BUDGET           8
43 #define NVMET_TCP_SEND_BUDGET           8
44 #define NVMET_TCP_IO_WORK_BUDGET        64
45
46 enum nvmet_tcp_send_state {
47         NVMET_TCP_SEND_DATA_PDU,
48         NVMET_TCP_SEND_DATA,
49         NVMET_TCP_SEND_R2T,
50         NVMET_TCP_SEND_DDGST,
51         NVMET_TCP_SEND_RESPONSE
52 };
53
54 enum nvmet_tcp_recv_state {
55         NVMET_TCP_RECV_PDU,
56         NVMET_TCP_RECV_DATA,
57         NVMET_TCP_RECV_DDGST,
58         NVMET_TCP_RECV_ERR,
59 };
60
61 enum {
62         NVMET_TCP_F_INIT_FAILED = (1 << 0),
63 };
64
65 struct nvmet_tcp_cmd {
66         struct nvmet_tcp_queue          *queue;
67         struct nvmet_req                req;
68
69         struct nvme_tcp_cmd_pdu         *cmd_pdu;
70         struct nvme_tcp_rsp_pdu         *rsp_pdu;
71         struct nvme_tcp_data_pdu        *data_pdu;
72         struct nvme_tcp_r2t_pdu         *r2t_pdu;
73
74         u32                             rbytes_done;
75         u32                             wbytes_done;
76
77         u32                             pdu_len;
78         u32                             pdu_recv;
79         int                             sg_idx;
80         int                             nr_mapped;
81         struct msghdr                   recv_msg;
82         struct kvec                     *iov;
83         u32                             flags;
84
85         struct list_head                entry;
86         struct llist_node               lentry;
87
88         /* send state */
89         u32                             offset;
90         struct scatterlist              *cur_sg;
91         enum nvmet_tcp_send_state       state;
92
93         __le32                          exp_ddgst;
94         __le32                          recv_ddgst;
95 };
96
97 enum nvmet_tcp_queue_state {
98         NVMET_TCP_Q_CONNECTING,
99         NVMET_TCP_Q_LIVE,
100         NVMET_TCP_Q_DISCONNECTING,
101 };
102
103 struct nvmet_tcp_queue {
104         struct socket           *sock;
105         struct nvmet_tcp_port   *port;
106         struct work_struct      io_work;
107         struct nvmet_cq         nvme_cq;
108         struct nvmet_sq         nvme_sq;
109
110         /* send state */
111         struct nvmet_tcp_cmd    *cmds;
112         unsigned int            nr_cmds;
113         struct list_head        free_list;
114         struct llist_head       resp_list;
115         struct list_head        resp_send_list;
116         int                     send_list_len;
117         struct nvmet_tcp_cmd    *snd_cmd;
118
119         /* recv state */
120         int                     offset;
121         int                     left;
122         enum nvmet_tcp_recv_state rcv_state;
123         struct nvmet_tcp_cmd    *cmd;
124         union nvme_tcp_pdu      pdu;
125
126         /* digest state */
127         bool                    hdr_digest;
128         bool                    data_digest;
129         struct ahash_request    *snd_hash;
130         struct ahash_request    *rcv_hash;
131
132         unsigned long           poll_end;
133
134         spinlock_t              state_lock;
135         enum nvmet_tcp_queue_state state;
136
137         struct sockaddr_storage sockaddr;
138         struct sockaddr_storage sockaddr_peer;
139         struct work_struct      release_work;
140
141         int                     idx;
142         struct list_head        queue_list;
143
144         struct nvmet_tcp_cmd    connect;
145
146         struct page_frag_cache  pf_cache;
147
148         void (*data_ready)(struct sock *);
149         void (*state_change)(struct sock *);
150         void (*write_space)(struct sock *);
151 };
152
153 struct nvmet_tcp_port {
154         struct socket           *sock;
155         struct work_struct      accept_work;
156         struct nvmet_port       *nport;
157         struct sockaddr_storage addr;
158         void (*data_ready)(struct sock *);
159 };
160
161 static DEFINE_IDA(nvmet_tcp_queue_ida);
162 static LIST_HEAD(nvmet_tcp_queue_list);
163 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
164
165 static struct workqueue_struct *nvmet_tcp_wq;
166 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
167 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
168 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
169
170 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
171                 struct nvmet_tcp_cmd *cmd)
172 {
173         if (unlikely(!queue->nr_cmds)) {
174                 /* We didn't allocate cmds yet, send 0xffff */
175                 return USHRT_MAX;
176         }
177
178         return cmd - queue->cmds;
179 }
180
181 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
182 {
183         return nvme_is_write(cmd->req.cmd) &&
184                 cmd->rbytes_done < cmd->req.transfer_len;
185 }
186
187 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
188 {
189         return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
190 }
191
192 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
193 {
194         return !nvme_is_write(cmd->req.cmd) &&
195                 cmd->req.transfer_len > 0 &&
196                 !cmd->req.cqe->status;
197 }
198
199 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
200 {
201         return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
202                 !cmd->rbytes_done;
203 }
204
205 static inline struct nvmet_tcp_cmd *
206 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
207 {
208         struct nvmet_tcp_cmd *cmd;
209
210         cmd = list_first_entry_or_null(&queue->free_list,
211                                 struct nvmet_tcp_cmd, entry);
212         if (!cmd)
213                 return NULL;
214         list_del_init(&cmd->entry);
215
216         cmd->rbytes_done = cmd->wbytes_done = 0;
217         cmd->pdu_len = 0;
218         cmd->pdu_recv = 0;
219         cmd->iov = NULL;
220         cmd->flags = 0;
221         return cmd;
222 }
223
224 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
225 {
226         if (unlikely(cmd == &cmd->queue->connect))
227                 return;
228
229         list_add_tail(&cmd->entry, &cmd->queue->free_list);
230 }
231
232 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
233 {
234         return queue->sock->sk->sk_incoming_cpu;
235 }
236
237 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
238 {
239         return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
240 }
241
242 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
243 {
244         return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
245 }
246
247 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
248                 void *pdu, size_t len)
249 {
250         struct scatterlist sg;
251
252         sg_init_one(&sg, pdu, len);
253         ahash_request_set_crypt(hash, &sg, pdu + len, len);
254         crypto_ahash_digest(hash);
255 }
256
257 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
258         void *pdu, size_t len)
259 {
260         struct nvme_tcp_hdr *hdr = pdu;
261         __le32 recv_digest;
262         __le32 exp_digest;
263
264         if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
265                 pr_err("queue %d: header digest enabled but no header digest\n",
266                         queue->idx);
267                 return -EPROTO;
268         }
269
270         recv_digest = *(__le32 *)(pdu + hdr->hlen);
271         nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
272         exp_digest = *(__le32 *)(pdu + hdr->hlen);
273         if (recv_digest != exp_digest) {
274                 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
275                         queue->idx, le32_to_cpu(recv_digest),
276                         le32_to_cpu(exp_digest));
277                 return -EPROTO;
278         }
279
280         return 0;
281 }
282
283 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
284 {
285         struct nvme_tcp_hdr *hdr = pdu;
286         u8 digest_len = nvmet_tcp_hdgst_len(queue);
287         u32 len;
288
289         len = le32_to_cpu(hdr->plen) - hdr->hlen -
290                 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
291
292         if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
293                 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
294                 return -EPROTO;
295         }
296
297         return 0;
298 }
299
300 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
301 {
302         struct scatterlist *sg;
303         int i;
304
305         sg = &cmd->req.sg[cmd->sg_idx];
306
307         for (i = 0; i < cmd->nr_mapped; i++)
308                 kunmap(sg_page(&sg[i]));
309 }
310
311 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
312 {
313         struct kvec *iov = cmd->iov;
314         struct scatterlist *sg;
315         u32 length, offset, sg_offset;
316
317         length = cmd->pdu_len;
318         cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
319         offset = cmd->rbytes_done;
320         cmd->sg_idx = offset / PAGE_SIZE;
321         sg_offset = offset % PAGE_SIZE;
322         sg = &cmd->req.sg[cmd->sg_idx];
323
324         while (length) {
325                 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
326
327                 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
328                 iov->iov_len = iov_len;
329
330                 length -= iov_len;
331                 sg = sg_next(sg);
332                 iov++;
333                 sg_offset = 0;
334         }
335
336         iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
337                 cmd->nr_mapped, cmd->pdu_len);
338 }
339
340 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
341 {
342         queue->rcv_state = NVMET_TCP_RECV_ERR;
343         if (queue->nvme_sq.ctrl)
344                 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
345         else
346                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
347 }
348
349 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
350 {
351         if (status == -EPIPE || status == -ECONNRESET)
352                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
353         else
354                 nvmet_tcp_fatal_error(queue);
355 }
356
357 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
358 {
359         struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
360         u32 len = le32_to_cpu(sgl->length);
361
362         if (!len)
363                 return 0;
364
365         if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
366                           NVME_SGL_FMT_OFFSET)) {
367                 if (!nvme_is_write(cmd->req.cmd))
368                         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
369
370                 if (len > cmd->req.port->inline_data_size)
371                         return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
372                 cmd->pdu_len = len;
373         }
374         cmd->req.transfer_len += len;
375
376         cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
377         if (!cmd->req.sg)
378                 return NVME_SC_INTERNAL;
379         cmd->cur_sg = cmd->req.sg;
380
381         if (nvmet_tcp_has_data_in(cmd)) {
382                 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
383                                 sizeof(*cmd->iov), GFP_KERNEL);
384                 if (!cmd->iov)
385                         goto err;
386         }
387
388         return 0;
389 err:
390         sgl_free(cmd->req.sg);
391         return NVME_SC_INTERNAL;
392 }
393
394 static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
395                 struct nvmet_tcp_cmd *cmd)
396 {
397         ahash_request_set_crypt(hash, cmd->req.sg,
398                 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
399         crypto_ahash_digest(hash);
400 }
401
402 static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
403                 struct nvmet_tcp_cmd *cmd)
404 {
405         struct scatterlist sg;
406         struct kvec *iov;
407         int i;
408
409         crypto_ahash_init(hash);
410         for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
411                 sg_init_one(&sg, iov->iov_base, iov->iov_len);
412                 ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
413                 crypto_ahash_update(hash);
414         }
415         ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
416         crypto_ahash_final(hash);
417 }
418
419 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
420 {
421         struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
422         struct nvmet_tcp_queue *queue = cmd->queue;
423         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
424         u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
425
426         cmd->offset = 0;
427         cmd->state = NVMET_TCP_SEND_DATA_PDU;
428
429         pdu->hdr.type = nvme_tcp_c2h_data;
430         pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
431                                                 NVME_TCP_F_DATA_SUCCESS : 0);
432         pdu->hdr.hlen = sizeof(*pdu);
433         pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
434         pdu->hdr.plen =
435                 cpu_to_le32(pdu->hdr.hlen + hdgst +
436                                 cmd->req.transfer_len + ddgst);
437         pdu->command_id = cmd->req.cqe->command_id;
438         pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
439         pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
440
441         if (queue->data_digest) {
442                 pdu->hdr.flags |= NVME_TCP_F_DDGST;
443                 nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
444         }
445
446         if (cmd->queue->hdr_digest) {
447                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
448                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
449         }
450 }
451
452 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
453 {
454         struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
455         struct nvmet_tcp_queue *queue = cmd->queue;
456         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
457
458         cmd->offset = 0;
459         cmd->state = NVMET_TCP_SEND_R2T;
460
461         pdu->hdr.type = nvme_tcp_r2t;
462         pdu->hdr.flags = 0;
463         pdu->hdr.hlen = sizeof(*pdu);
464         pdu->hdr.pdo = 0;
465         pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
466
467         pdu->command_id = cmd->req.cmd->common.command_id;
468         pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
469         pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
470         pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
471         if (cmd->queue->hdr_digest) {
472                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
473                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
474         }
475 }
476
477 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
478 {
479         struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
480         struct nvmet_tcp_queue *queue = cmd->queue;
481         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
482
483         cmd->offset = 0;
484         cmd->state = NVMET_TCP_SEND_RESPONSE;
485
486         pdu->hdr.type = nvme_tcp_rsp;
487         pdu->hdr.flags = 0;
488         pdu->hdr.hlen = sizeof(*pdu);
489         pdu->hdr.pdo = 0;
490         pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
491         if (cmd->queue->hdr_digest) {
492                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
493                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
494         }
495 }
496
497 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
498 {
499         struct llist_node *node;
500         struct nvmet_tcp_cmd *cmd;
501
502         for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
503                 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
504                 list_add(&cmd->entry, &queue->resp_send_list);
505                 queue->send_list_len++;
506         }
507 }
508
509 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
510 {
511         queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
512                                 struct nvmet_tcp_cmd, entry);
513         if (!queue->snd_cmd) {
514                 nvmet_tcp_process_resp_list(queue);
515                 queue->snd_cmd =
516                         list_first_entry_or_null(&queue->resp_send_list,
517                                         struct nvmet_tcp_cmd, entry);
518                 if (unlikely(!queue->snd_cmd))
519                         return NULL;
520         }
521
522         list_del_init(&queue->snd_cmd->entry);
523         queue->send_list_len--;
524
525         if (nvmet_tcp_need_data_out(queue->snd_cmd))
526                 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
527         else if (nvmet_tcp_need_data_in(queue->snd_cmd))
528                 nvmet_setup_r2t_pdu(queue->snd_cmd);
529         else
530                 nvmet_setup_response_pdu(queue->snd_cmd);
531
532         return queue->snd_cmd;
533 }
534
535 static void nvmet_tcp_queue_response(struct nvmet_req *req)
536 {
537         struct nvmet_tcp_cmd *cmd =
538                 container_of(req, struct nvmet_tcp_cmd, req);
539         struct nvmet_tcp_queue  *queue = cmd->queue;
540         struct nvme_sgl_desc *sgl;
541         u32 len;
542
543         if (unlikely(cmd == queue->cmd)) {
544                 sgl = &cmd->req.cmd->common.dptr.sgl;
545                 len = le32_to_cpu(sgl->length);
546
547                 /*
548                  * Wait for inline data before processing the response.
549                  * Avoid using helpers, this might happen before
550                  * nvmet_req_init is completed.
551                  */
552                 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
553                     len && len <= cmd->req.port->inline_data_size &&
554                     nvme_is_write(cmd->req.cmd))
555                         return;
556         }
557
558         llist_add(&cmd->lentry, &queue->resp_list);
559         queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
560 }
561
562 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
563 {
564         if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
565                 nvmet_tcp_queue_response(&cmd->req);
566         else
567                 cmd->req.execute(&cmd->req);
568 }
569
570 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
571 {
572         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
573         int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
574         int ret;
575
576         ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
577                         offset_in_page(cmd->data_pdu) + cmd->offset,
578                         left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
579         if (ret <= 0)
580                 return ret;
581
582         cmd->offset += ret;
583         left -= ret;
584
585         if (left)
586                 return -EAGAIN;
587
588         cmd->state = NVMET_TCP_SEND_DATA;
589         cmd->offset  = 0;
590         return 1;
591 }
592
593 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
594 {
595         struct nvmet_tcp_queue *queue = cmd->queue;
596         int ret;
597
598         while (cmd->cur_sg) {
599                 struct page *page = sg_page(cmd->cur_sg);
600                 u32 left = cmd->cur_sg->length - cmd->offset;
601                 int flags = MSG_DONTWAIT;
602
603                 if ((!last_in_batch && cmd->queue->send_list_len) ||
604                     cmd->wbytes_done + left < cmd->req.transfer_len ||
605                     queue->data_digest || !queue->nvme_sq.sqhd_disabled)
606                         flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
607
608                 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
609                                         left, flags);
610                 if (ret <= 0)
611                         return ret;
612
613                 cmd->offset += ret;
614                 cmd->wbytes_done += ret;
615
616                 /* Done with sg?*/
617                 if (cmd->offset == cmd->cur_sg->length) {
618                         cmd->cur_sg = sg_next(cmd->cur_sg);
619                         cmd->offset = 0;
620                 }
621         }
622
623         if (queue->data_digest) {
624                 cmd->state = NVMET_TCP_SEND_DDGST;
625                 cmd->offset = 0;
626         } else {
627                 if (queue->nvme_sq.sqhd_disabled) {
628                         cmd->queue->snd_cmd = NULL;
629                         nvmet_tcp_put_cmd(cmd);
630                 } else {
631                         nvmet_setup_response_pdu(cmd);
632                 }
633         }
634
635         if (queue->nvme_sq.sqhd_disabled) {
636                 kfree(cmd->iov);
637                 sgl_free(cmd->req.sg);
638         }
639
640         return 1;
641
642 }
643
644 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
645                 bool last_in_batch)
646 {
647         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
648         int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
649         int flags = MSG_DONTWAIT;
650         int ret;
651
652         if (!last_in_batch && cmd->queue->send_list_len)
653                 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
654         else
655                 flags |= MSG_EOR;
656
657         ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
658                 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
659         if (ret <= 0)
660                 return ret;
661         cmd->offset += ret;
662         left -= ret;
663
664         if (left)
665                 return -EAGAIN;
666
667         kfree(cmd->iov);
668         sgl_free(cmd->req.sg);
669         cmd->queue->snd_cmd = NULL;
670         nvmet_tcp_put_cmd(cmd);
671         return 1;
672 }
673
674 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
675 {
676         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
677         int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
678         int flags = MSG_DONTWAIT;
679         int ret;
680
681         if (!last_in_batch && cmd->queue->send_list_len)
682                 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
683         else
684                 flags |= MSG_EOR;
685
686         ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
687                 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
688         if (ret <= 0)
689                 return ret;
690         cmd->offset += ret;
691         left -= ret;
692
693         if (left)
694                 return -EAGAIN;
695
696         cmd->queue->snd_cmd = NULL;
697         return 1;
698 }
699
700 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
701 {
702         struct nvmet_tcp_queue *queue = cmd->queue;
703         int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
704         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
705         struct kvec iov = {
706                 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
707                 .iov_len = left
708         };
709         int ret;
710
711         if (!last_in_batch && cmd->queue->send_list_len)
712                 msg.msg_flags |= MSG_MORE;
713         else
714                 msg.msg_flags |= MSG_EOR;
715
716         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
717         if (unlikely(ret <= 0))
718                 return ret;
719
720         cmd->offset += ret;
721         left -= ret;
722
723         if (left)
724                 return -EAGAIN;
725
726         if (queue->nvme_sq.sqhd_disabled) {
727                 cmd->queue->snd_cmd = NULL;
728                 nvmet_tcp_put_cmd(cmd);
729         } else {
730                 nvmet_setup_response_pdu(cmd);
731         }
732         return 1;
733 }
734
735 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
736                 bool last_in_batch)
737 {
738         struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
739         int ret = 0;
740
741         if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
742                 cmd = nvmet_tcp_fetch_cmd(queue);
743                 if (unlikely(!cmd))
744                         return 0;
745         }
746
747         if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
748                 ret = nvmet_try_send_data_pdu(cmd);
749                 if (ret <= 0)
750                         goto done_send;
751         }
752
753         if (cmd->state == NVMET_TCP_SEND_DATA) {
754                 ret = nvmet_try_send_data(cmd, last_in_batch);
755                 if (ret <= 0)
756                         goto done_send;
757         }
758
759         if (cmd->state == NVMET_TCP_SEND_DDGST) {
760                 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
761                 if (ret <= 0)
762                         goto done_send;
763         }
764
765         if (cmd->state == NVMET_TCP_SEND_R2T) {
766                 ret = nvmet_try_send_r2t(cmd, last_in_batch);
767                 if (ret <= 0)
768                         goto done_send;
769         }
770
771         if (cmd->state == NVMET_TCP_SEND_RESPONSE)
772                 ret = nvmet_try_send_response(cmd, last_in_batch);
773
774 done_send:
775         if (ret < 0) {
776                 if (ret == -EAGAIN)
777                         return 0;
778                 return ret;
779         }
780
781         return 1;
782 }
783
784 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
785                 int budget, int *sends)
786 {
787         int i, ret = 0;
788
789         for (i = 0; i < budget; i++) {
790                 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
791                 if (unlikely(ret < 0)) {
792                         nvmet_tcp_socket_error(queue, ret);
793                         goto done;
794                 } else if (ret == 0) {
795                         break;
796                 }
797                 (*sends)++;
798         }
799 done:
800         return ret;
801 }
802
803 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
804 {
805         queue->offset = 0;
806         queue->left = sizeof(struct nvme_tcp_hdr);
807         queue->cmd = NULL;
808         queue->rcv_state = NVMET_TCP_RECV_PDU;
809 }
810
811 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
812 {
813         struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
814
815         ahash_request_free(queue->rcv_hash);
816         ahash_request_free(queue->snd_hash);
817         crypto_free_ahash(tfm);
818 }
819
820 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
821 {
822         struct crypto_ahash *tfm;
823
824         tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
825         if (IS_ERR(tfm))
826                 return PTR_ERR(tfm);
827
828         queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
829         if (!queue->snd_hash)
830                 goto free_tfm;
831         ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
832
833         queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
834         if (!queue->rcv_hash)
835                 goto free_snd_hash;
836         ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
837
838         return 0;
839 free_snd_hash:
840         ahash_request_free(queue->snd_hash);
841 free_tfm:
842         crypto_free_ahash(tfm);
843         return -ENOMEM;
844 }
845
846
847 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
848 {
849         struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
850         struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
851         struct msghdr msg = {};
852         struct kvec iov;
853         int ret;
854
855         if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
856                 pr_err("bad nvme-tcp pdu length (%d)\n",
857                         le32_to_cpu(icreq->hdr.plen));
858                 nvmet_tcp_fatal_error(queue);
859         }
860
861         if (icreq->pfv != NVME_TCP_PFV_1_0) {
862                 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
863                 return -EPROTO;
864         }
865
866         if (icreq->hpda != 0) {
867                 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
868                         icreq->hpda);
869                 return -EPROTO;
870         }
871
872         queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
873         queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
874         if (queue->hdr_digest || queue->data_digest) {
875                 ret = nvmet_tcp_alloc_crypto(queue);
876                 if (ret)
877                         return ret;
878         }
879
880         memset(icresp, 0, sizeof(*icresp));
881         icresp->hdr.type = nvme_tcp_icresp;
882         icresp->hdr.hlen = sizeof(*icresp);
883         icresp->hdr.pdo = 0;
884         icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
885         icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
886         icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
887         icresp->cpda = 0;
888         if (queue->hdr_digest)
889                 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
890         if (queue->data_digest)
891                 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
892
893         iov.iov_base = icresp;
894         iov.iov_len = sizeof(*icresp);
895         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
896         if (ret < 0)
897                 goto free_crypto;
898
899         queue->state = NVMET_TCP_Q_LIVE;
900         nvmet_prepare_receive_pdu(queue);
901         return 0;
902 free_crypto:
903         if (queue->hdr_digest || queue->data_digest)
904                 nvmet_tcp_free_crypto(queue);
905         return ret;
906 }
907
908 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
909                 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
910 {
911         size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
912         int ret;
913
914         if (!nvme_is_write(cmd->req.cmd) ||
915             data_len > cmd->req.port->inline_data_size) {
916                 nvmet_prepare_receive_pdu(queue);
917                 return;
918         }
919
920         ret = nvmet_tcp_map_data(cmd);
921         if (unlikely(ret)) {
922                 pr_err("queue %d: failed to map data\n", queue->idx);
923                 nvmet_tcp_fatal_error(queue);
924                 return;
925         }
926
927         queue->rcv_state = NVMET_TCP_RECV_DATA;
928         nvmet_tcp_map_pdu_iovec(cmd);
929         cmd->flags |= NVMET_TCP_F_INIT_FAILED;
930 }
931
932 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
933 {
934         struct nvme_tcp_data_pdu *data = &queue->pdu.data;
935         struct nvmet_tcp_cmd *cmd;
936
937         if (likely(queue->nr_cmds))
938                 cmd = &queue->cmds[data->ttag];
939         else
940                 cmd = &queue->connect;
941
942         if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
943                 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
944                         data->ttag, le32_to_cpu(data->data_offset),
945                         cmd->rbytes_done);
946                 /* FIXME: use path and transport errors */
947                 nvmet_req_complete(&cmd->req,
948                         NVME_SC_INVALID_FIELD | NVME_SC_DNR);
949                 return -EPROTO;
950         }
951
952         cmd->pdu_len = le32_to_cpu(data->data_length);
953         cmd->pdu_recv = 0;
954         nvmet_tcp_map_pdu_iovec(cmd);
955         queue->cmd = cmd;
956         queue->rcv_state = NVMET_TCP_RECV_DATA;
957
958         return 0;
959 }
960
961 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
962 {
963         struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
964         struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
965         struct nvmet_req *req;
966         int ret;
967
968         if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
969                 if (hdr->type != nvme_tcp_icreq) {
970                         pr_err("unexpected pdu type (%d) before icreq\n",
971                                 hdr->type);
972                         nvmet_tcp_fatal_error(queue);
973                         return -EPROTO;
974                 }
975                 return nvmet_tcp_handle_icreq(queue);
976         }
977
978         if (hdr->type == nvme_tcp_h2c_data) {
979                 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
980                 if (unlikely(ret))
981                         return ret;
982                 return 0;
983         }
984
985         queue->cmd = nvmet_tcp_get_cmd(queue);
986         if (unlikely(!queue->cmd)) {
987                 /* This should never happen */
988                 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
989                         queue->idx, queue->nr_cmds, queue->send_list_len,
990                         nvme_cmd->common.opcode);
991                 nvmet_tcp_fatal_error(queue);
992                 return -ENOMEM;
993         }
994
995         req = &queue->cmd->req;
996         memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
997
998         if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
999                         &queue->nvme_sq, &nvmet_tcp_ops))) {
1000                 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1001                         req->cmd, req->cmd->common.command_id,
1002                         req->cmd->common.opcode,
1003                         le32_to_cpu(req->cmd->common.dptr.sgl.length));
1004
1005                 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1006                 return 0;
1007         }
1008
1009         ret = nvmet_tcp_map_data(queue->cmd);
1010         if (unlikely(ret)) {
1011                 pr_err("queue %d: failed to map data\n", queue->idx);
1012                 if (nvmet_tcp_has_inline_data(queue->cmd))
1013                         nvmet_tcp_fatal_error(queue);
1014                 else
1015                         nvmet_req_complete(req, ret);
1016                 ret = -EAGAIN;
1017                 goto out;
1018         }
1019
1020         if (nvmet_tcp_need_data_in(queue->cmd)) {
1021                 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1022                         queue->rcv_state = NVMET_TCP_RECV_DATA;
1023                         nvmet_tcp_map_pdu_iovec(queue->cmd);
1024                         return 0;
1025                 }
1026                 /* send back R2T */
1027                 nvmet_tcp_queue_response(&queue->cmd->req);
1028                 goto out;
1029         }
1030
1031         queue->cmd->req.execute(&queue->cmd->req);
1032 out:
1033         nvmet_prepare_receive_pdu(queue);
1034         return ret;
1035 }
1036
1037 static const u8 nvme_tcp_pdu_sizes[] = {
1038         [nvme_tcp_icreq]        = sizeof(struct nvme_tcp_icreq_pdu),
1039         [nvme_tcp_cmd]          = sizeof(struct nvme_tcp_cmd_pdu),
1040         [nvme_tcp_h2c_data]     = sizeof(struct nvme_tcp_data_pdu),
1041 };
1042
1043 static inline u8 nvmet_tcp_pdu_size(u8 type)
1044 {
1045         size_t idx = type;
1046
1047         return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1048                 nvme_tcp_pdu_sizes[idx]) ?
1049                         nvme_tcp_pdu_sizes[idx] : 0;
1050 }
1051
1052 static inline bool nvmet_tcp_pdu_valid(u8 type)
1053 {
1054         switch (type) {
1055         case nvme_tcp_icreq:
1056         case nvme_tcp_cmd:
1057         case nvme_tcp_h2c_data:
1058                 /* fallthru */
1059                 return true;
1060         }
1061
1062         return false;
1063 }
1064
1065 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1066 {
1067         struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1068         int len;
1069         struct kvec iov;
1070         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1071
1072 recv:
1073         iov.iov_base = (void *)&queue->pdu + queue->offset;
1074         iov.iov_len = queue->left;
1075         len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1076                         iov.iov_len, msg.msg_flags);
1077         if (unlikely(len < 0))
1078                 return len;
1079
1080         queue->offset += len;
1081         queue->left -= len;
1082         if (queue->left)
1083                 return -EAGAIN;
1084
1085         if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1086                 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1087
1088                 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1089                         pr_err("unexpected pdu type %d\n", hdr->type);
1090                         nvmet_tcp_fatal_error(queue);
1091                         return -EIO;
1092                 }
1093
1094                 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1095                         pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1096                         return -EIO;
1097                 }
1098
1099                 queue->left = hdr->hlen - queue->offset + hdgst;
1100                 goto recv;
1101         }
1102
1103         if (queue->hdr_digest &&
1104             nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1105                 nvmet_tcp_fatal_error(queue); /* fatal */
1106                 return -EPROTO;
1107         }
1108
1109         if (queue->data_digest &&
1110             nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1111                 nvmet_tcp_fatal_error(queue); /* fatal */
1112                 return -EPROTO;
1113         }
1114
1115         return nvmet_tcp_done_recv_pdu(queue);
1116 }
1117
1118 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1119 {
1120         struct nvmet_tcp_queue *queue = cmd->queue;
1121
1122         nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
1123         queue->offset = 0;
1124         queue->left = NVME_TCP_DIGEST_LENGTH;
1125         queue->rcv_state = NVMET_TCP_RECV_DDGST;
1126 }
1127
1128 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1129 {
1130         struct nvmet_tcp_cmd  *cmd = queue->cmd;
1131         int ret;
1132
1133         while (msg_data_left(&cmd->recv_msg)) {
1134                 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1135                         cmd->recv_msg.msg_flags);
1136                 if (ret <= 0)
1137                         return ret;
1138
1139                 cmd->pdu_recv += ret;
1140                 cmd->rbytes_done += ret;
1141         }
1142
1143         nvmet_tcp_unmap_pdu_iovec(cmd);
1144         if (queue->data_digest) {
1145                 nvmet_tcp_prep_recv_ddgst(cmd);
1146                 return 0;
1147         }
1148
1149         if (cmd->rbytes_done == cmd->req.transfer_len)
1150                 nvmet_tcp_execute_request(cmd);
1151
1152         nvmet_prepare_receive_pdu(queue);
1153         return 0;
1154 }
1155
1156 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1157 {
1158         struct nvmet_tcp_cmd *cmd = queue->cmd;
1159         int ret;
1160         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1161         struct kvec iov = {
1162                 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1163                 .iov_len = queue->left
1164         };
1165
1166         ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1167                         iov.iov_len, msg.msg_flags);
1168         if (unlikely(ret < 0))
1169                 return ret;
1170
1171         queue->offset += ret;
1172         queue->left -= ret;
1173         if (queue->left)
1174                 return -EAGAIN;
1175
1176         if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1177                 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1178                         queue->idx, cmd->req.cmd->common.command_id,
1179                         queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1180                         le32_to_cpu(cmd->exp_ddgst));
1181                 nvmet_tcp_finish_cmd(cmd);
1182                 nvmet_tcp_fatal_error(queue);
1183                 ret = -EPROTO;
1184                 goto out;
1185         }
1186
1187         if (cmd->rbytes_done == cmd->req.transfer_len)
1188                 nvmet_tcp_execute_request(cmd);
1189
1190         ret = 0;
1191 out:
1192         nvmet_prepare_receive_pdu(queue);
1193         return ret;
1194 }
1195
1196 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1197 {
1198         int result = 0;
1199
1200         if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1201                 return 0;
1202
1203         if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1204                 result = nvmet_tcp_try_recv_pdu(queue);
1205                 if (result != 0)
1206                         goto done_recv;
1207         }
1208
1209         if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1210                 result = nvmet_tcp_try_recv_data(queue);
1211                 if (result != 0)
1212                         goto done_recv;
1213         }
1214
1215         if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1216                 result = nvmet_tcp_try_recv_ddgst(queue);
1217                 if (result != 0)
1218                         goto done_recv;
1219         }
1220
1221 done_recv:
1222         if (result < 0) {
1223                 if (result == -EAGAIN)
1224                         return 0;
1225                 return result;
1226         }
1227         return 1;
1228 }
1229
1230 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1231                 int budget, int *recvs)
1232 {
1233         int i, ret = 0;
1234
1235         for (i = 0; i < budget; i++) {
1236                 ret = nvmet_tcp_try_recv_one(queue);
1237                 if (unlikely(ret < 0)) {
1238                         nvmet_tcp_socket_error(queue, ret);
1239                         goto done;
1240                 } else if (ret == 0) {
1241                         break;
1242                 }
1243                 (*recvs)++;
1244         }
1245 done:
1246         return ret;
1247 }
1248
1249 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1250 {
1251         spin_lock(&queue->state_lock);
1252         if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1253                 queue->state = NVMET_TCP_Q_DISCONNECTING;
1254                 queue_work(nvmet_wq, &queue->release_work);
1255         }
1256         spin_unlock(&queue->state_lock);
1257 }
1258
1259 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1260 {
1261         queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1262 }
1263
1264 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1265                 int ops)
1266 {
1267         if (!idle_poll_period_usecs)
1268                 return false;
1269
1270         if (ops)
1271                 nvmet_tcp_arm_queue_deadline(queue);
1272
1273         return !time_after(jiffies, queue->poll_end);
1274 }
1275
1276 static void nvmet_tcp_io_work(struct work_struct *w)
1277 {
1278         struct nvmet_tcp_queue *queue =
1279                 container_of(w, struct nvmet_tcp_queue, io_work);
1280         bool pending;
1281         int ret, ops = 0;
1282
1283         do {
1284                 pending = false;
1285
1286                 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1287                 if (ret > 0)
1288                         pending = true;
1289                 else if (ret < 0)
1290                         return;
1291
1292                 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1293                 if (ret > 0)
1294                         pending = true;
1295                 else if (ret < 0)
1296                         return;
1297
1298         } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1299
1300         /*
1301          * Requeue the worker if idle deadline period is in progress or any
1302          * ops activity was recorded during the do-while loop above.
1303          */
1304         if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1305                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1306 }
1307
1308 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1309                 struct nvmet_tcp_cmd *c)
1310 {
1311         u8 hdgst = nvmet_tcp_hdgst_len(queue);
1312
1313         c->queue = queue;
1314         c->req.port = queue->port->nport;
1315
1316         c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1317                         sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1318         if (!c->cmd_pdu)
1319                 return -ENOMEM;
1320         c->req.cmd = &c->cmd_pdu->cmd;
1321
1322         c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1323                         sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1324         if (!c->rsp_pdu)
1325                 goto out_free_cmd;
1326         c->req.cqe = &c->rsp_pdu->cqe;
1327
1328         c->data_pdu = page_frag_alloc(&queue->pf_cache,
1329                         sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1330         if (!c->data_pdu)
1331                 goto out_free_rsp;
1332
1333         c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1334                         sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1335         if (!c->r2t_pdu)
1336                 goto out_free_data;
1337
1338         c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1339
1340         list_add_tail(&c->entry, &queue->free_list);
1341
1342         return 0;
1343 out_free_data:
1344         page_frag_free(c->data_pdu);
1345 out_free_rsp:
1346         page_frag_free(c->rsp_pdu);
1347 out_free_cmd:
1348         page_frag_free(c->cmd_pdu);
1349         return -ENOMEM;
1350 }
1351
1352 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1353 {
1354         page_frag_free(c->r2t_pdu);
1355         page_frag_free(c->data_pdu);
1356         page_frag_free(c->rsp_pdu);
1357         page_frag_free(c->cmd_pdu);
1358 }
1359
1360 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1361 {
1362         struct nvmet_tcp_cmd *cmds;
1363         int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1364
1365         cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1366         if (!cmds)
1367                 goto out;
1368
1369         for (i = 0; i < nr_cmds; i++) {
1370                 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1371                 if (ret)
1372                         goto out_free;
1373         }
1374
1375         queue->cmds = cmds;
1376
1377         return 0;
1378 out_free:
1379         while (--i >= 0)
1380                 nvmet_tcp_free_cmd(cmds + i);
1381         kfree(cmds);
1382 out:
1383         return ret;
1384 }
1385
1386 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1387 {
1388         struct nvmet_tcp_cmd *cmds = queue->cmds;
1389         int i;
1390
1391         for (i = 0; i < queue->nr_cmds; i++)
1392                 nvmet_tcp_free_cmd(cmds + i);
1393
1394         nvmet_tcp_free_cmd(&queue->connect);
1395         kfree(cmds);
1396 }
1397
1398 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1399 {
1400         struct socket *sock = queue->sock;
1401
1402         write_lock_bh(&sock->sk->sk_callback_lock);
1403         sock->sk->sk_data_ready =  queue->data_ready;
1404         sock->sk->sk_state_change = queue->state_change;
1405         sock->sk->sk_write_space = queue->write_space;
1406         sock->sk->sk_user_data = NULL;
1407         write_unlock_bh(&sock->sk->sk_callback_lock);
1408 }
1409
1410 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1411 {
1412         nvmet_req_uninit(&cmd->req);
1413         nvmet_tcp_unmap_pdu_iovec(cmd);
1414         kfree(cmd->iov);
1415         sgl_free(cmd->req.sg);
1416 }
1417
1418 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1419 {
1420         struct nvmet_tcp_cmd *cmd = queue->cmds;
1421         int i;
1422
1423         for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1424                 if (nvmet_tcp_need_data_in(cmd))
1425                         nvmet_tcp_finish_cmd(cmd);
1426         }
1427
1428         if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1429                 /* failed in connect */
1430                 nvmet_tcp_finish_cmd(&queue->connect);
1431         }
1432 }
1433
1434 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1435 {
1436         struct page *page;
1437         struct nvmet_tcp_queue *queue =
1438                 container_of(w, struct nvmet_tcp_queue, release_work);
1439
1440         mutex_lock(&nvmet_tcp_queue_mutex);
1441         list_del_init(&queue->queue_list);
1442         mutex_unlock(&nvmet_tcp_queue_mutex);
1443
1444         nvmet_tcp_restore_socket_callbacks(queue);
1445         flush_work(&queue->io_work);
1446
1447         nvmet_tcp_uninit_data_in_cmds(queue);
1448         nvmet_sq_destroy(&queue->nvme_sq);
1449         cancel_work_sync(&queue->io_work);
1450         sock_release(queue->sock);
1451         nvmet_tcp_free_cmds(queue);
1452         if (queue->hdr_digest || queue->data_digest)
1453                 nvmet_tcp_free_crypto(queue);
1454         ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1455
1456         page = virt_to_head_page(queue->pf_cache.va);
1457         __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1458         kfree(queue);
1459 }
1460
1461 static void nvmet_tcp_data_ready(struct sock *sk)
1462 {
1463         struct nvmet_tcp_queue *queue;
1464
1465         read_lock_bh(&sk->sk_callback_lock);
1466         queue = sk->sk_user_data;
1467         if (likely(queue))
1468                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1469         read_unlock_bh(&sk->sk_callback_lock);
1470 }
1471
1472 static void nvmet_tcp_write_space(struct sock *sk)
1473 {
1474         struct nvmet_tcp_queue *queue;
1475
1476         read_lock_bh(&sk->sk_callback_lock);
1477         queue = sk->sk_user_data;
1478         if (unlikely(!queue))
1479                 goto out;
1480
1481         if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1482                 queue->write_space(sk);
1483                 goto out;
1484         }
1485
1486         if (sk_stream_is_writeable(sk)) {
1487                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1488                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1489         }
1490 out:
1491         read_unlock_bh(&sk->sk_callback_lock);
1492 }
1493
1494 static void nvmet_tcp_state_change(struct sock *sk)
1495 {
1496         struct nvmet_tcp_queue *queue;
1497
1498         read_lock_bh(&sk->sk_callback_lock);
1499         queue = sk->sk_user_data;
1500         if (!queue)
1501                 goto done;
1502
1503         switch (sk->sk_state) {
1504         case TCP_FIN_WAIT2:
1505         case TCP_LAST_ACK:
1506                 break;
1507         case TCP_FIN_WAIT1:
1508         case TCP_CLOSE_WAIT:
1509         case TCP_CLOSE:
1510                 /* FALLTHRU */
1511                 nvmet_tcp_schedule_release_queue(queue);
1512                 break;
1513         default:
1514                 pr_warn("queue %d unhandled state %d\n",
1515                         queue->idx, sk->sk_state);
1516         }
1517 done:
1518         read_unlock_bh(&sk->sk_callback_lock);
1519 }
1520
1521 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1522 {
1523         struct socket *sock = queue->sock;
1524         struct inet_sock *inet = inet_sk(sock->sk);
1525         int ret;
1526
1527         ret = kernel_getsockname(sock,
1528                 (struct sockaddr *)&queue->sockaddr);
1529         if (ret < 0)
1530                 return ret;
1531
1532         ret = kernel_getpeername(sock,
1533                 (struct sockaddr *)&queue->sockaddr_peer);
1534         if (ret < 0)
1535                 return ret;
1536
1537         /*
1538          * Cleanup whatever is sitting in the TCP transmit queue on socket
1539          * close. This is done to prevent stale data from being sent should
1540          * the network connection be restored before TCP times out.
1541          */
1542         sock_no_linger(sock->sk);
1543
1544         if (so_priority > 0)
1545                 sock_set_priority(sock->sk, so_priority);
1546
1547         /* Set socket type of service */
1548         if (inet->rcv_tos > 0)
1549                 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1550
1551         ret = 0;
1552         write_lock_bh(&sock->sk->sk_callback_lock);
1553         if (sock->sk->sk_state != TCP_ESTABLISHED) {
1554                 /*
1555                  * If the socket is already closing, don't even start
1556                  * consuming it
1557                  */
1558                 ret = -ENOTCONN;
1559         } else {
1560                 sock->sk->sk_user_data = queue;
1561                 queue->data_ready = sock->sk->sk_data_ready;
1562                 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1563                 queue->state_change = sock->sk->sk_state_change;
1564                 sock->sk->sk_state_change = nvmet_tcp_state_change;
1565                 queue->write_space = sock->sk->sk_write_space;
1566                 sock->sk->sk_write_space = nvmet_tcp_write_space;
1567                 if (idle_poll_period_usecs)
1568                         nvmet_tcp_arm_queue_deadline(queue);
1569                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1570         }
1571         write_unlock_bh(&sock->sk->sk_callback_lock);
1572
1573         return ret;
1574 }
1575
1576 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1577                 struct socket *newsock)
1578 {
1579         struct nvmet_tcp_queue *queue;
1580         int ret;
1581
1582         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1583         if (!queue)
1584                 return -ENOMEM;
1585
1586         INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1587         INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1588         queue->sock = newsock;
1589         queue->port = port;
1590         queue->nr_cmds = 0;
1591         spin_lock_init(&queue->state_lock);
1592         queue->state = NVMET_TCP_Q_CONNECTING;
1593         INIT_LIST_HEAD(&queue->free_list);
1594         init_llist_head(&queue->resp_list);
1595         INIT_LIST_HEAD(&queue->resp_send_list);
1596
1597         queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
1598         if (queue->idx < 0) {
1599                 ret = queue->idx;
1600                 goto out_free_queue;
1601         }
1602
1603         ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1604         if (ret)
1605                 goto out_ida_remove;
1606
1607         ret = nvmet_sq_init(&queue->nvme_sq);
1608         if (ret)
1609                 goto out_free_connect;
1610
1611         nvmet_prepare_receive_pdu(queue);
1612
1613         mutex_lock(&nvmet_tcp_queue_mutex);
1614         list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1615         mutex_unlock(&nvmet_tcp_queue_mutex);
1616
1617         ret = nvmet_tcp_set_queue_sock(queue);
1618         if (ret)
1619                 goto out_destroy_sq;
1620
1621         return 0;
1622 out_destroy_sq:
1623         mutex_lock(&nvmet_tcp_queue_mutex);
1624         list_del_init(&queue->queue_list);
1625         mutex_unlock(&nvmet_tcp_queue_mutex);
1626         nvmet_sq_destroy(&queue->nvme_sq);
1627 out_free_connect:
1628         nvmet_tcp_free_cmd(&queue->connect);
1629 out_ida_remove:
1630         ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
1631 out_free_queue:
1632         kfree(queue);
1633         return ret;
1634 }
1635
1636 static void nvmet_tcp_accept_work(struct work_struct *w)
1637 {
1638         struct nvmet_tcp_port *port =
1639                 container_of(w, struct nvmet_tcp_port, accept_work);
1640         struct socket *newsock;
1641         int ret;
1642
1643         while (true) {
1644                 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1645                 if (ret < 0) {
1646                         if (ret != -EAGAIN)
1647                                 pr_warn("failed to accept err=%d\n", ret);
1648                         return;
1649                 }
1650                 ret = nvmet_tcp_alloc_queue(port, newsock);
1651                 if (ret) {
1652                         pr_err("failed to allocate queue\n");
1653                         sock_release(newsock);
1654                 }
1655         }
1656 }
1657
1658 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1659 {
1660         struct nvmet_tcp_port *port;
1661
1662         read_lock_bh(&sk->sk_callback_lock);
1663         port = sk->sk_user_data;
1664         if (!port)
1665                 goto out;
1666
1667         if (sk->sk_state == TCP_LISTEN)
1668                 queue_work(nvmet_wq, &port->accept_work);
1669 out:
1670         read_unlock_bh(&sk->sk_callback_lock);
1671 }
1672
1673 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1674 {
1675         struct nvmet_tcp_port *port;
1676         __kernel_sa_family_t af;
1677         int ret;
1678
1679         port = kzalloc(sizeof(*port), GFP_KERNEL);
1680         if (!port)
1681                 return -ENOMEM;
1682
1683         switch (nport->disc_addr.adrfam) {
1684         case NVMF_ADDR_FAMILY_IP4:
1685                 af = AF_INET;
1686                 break;
1687         case NVMF_ADDR_FAMILY_IP6:
1688                 af = AF_INET6;
1689                 break;
1690         default:
1691                 pr_err("address family %d not supported\n",
1692                                 nport->disc_addr.adrfam);
1693                 ret = -EINVAL;
1694                 goto err_port;
1695         }
1696
1697         ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1698                         nport->disc_addr.trsvcid, &port->addr);
1699         if (ret) {
1700                 pr_err("malformed ip/port passed: %s:%s\n",
1701                         nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1702                 goto err_port;
1703         }
1704
1705         port->nport = nport;
1706         INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1707         if (port->nport->inline_data_size < 0)
1708                 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1709
1710         ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1711                                 IPPROTO_TCP, &port->sock);
1712         if (ret) {
1713                 pr_err("failed to create a socket\n");
1714                 goto err_port;
1715         }
1716
1717         port->sock->sk->sk_user_data = port;
1718         port->data_ready = port->sock->sk->sk_data_ready;
1719         port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1720         sock_set_reuseaddr(port->sock->sk);
1721         tcp_sock_set_nodelay(port->sock->sk);
1722         if (so_priority > 0)
1723                 sock_set_priority(port->sock->sk, so_priority);
1724
1725         ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1726                         sizeof(port->addr));
1727         if (ret) {
1728                 pr_err("failed to bind port socket %d\n", ret);
1729                 goto err_sock;
1730         }
1731
1732         ret = kernel_listen(port->sock, 128);
1733         if (ret) {
1734                 pr_err("failed to listen %d on port sock\n", ret);
1735                 goto err_sock;
1736         }
1737
1738         nport->priv = port;
1739         pr_info("enabling port %d (%pISpc)\n",
1740                 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1741
1742         return 0;
1743
1744 err_sock:
1745         sock_release(port->sock);
1746 err_port:
1747         kfree(port);
1748         return ret;
1749 }
1750
1751 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1752 {
1753         struct nvmet_tcp_queue *queue;
1754
1755         mutex_lock(&nvmet_tcp_queue_mutex);
1756         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1757                 if (queue->port == port)
1758                         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1759         mutex_unlock(&nvmet_tcp_queue_mutex);
1760 }
1761
1762 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1763 {
1764         struct nvmet_tcp_port *port = nport->priv;
1765
1766         write_lock_bh(&port->sock->sk->sk_callback_lock);
1767         port->sock->sk->sk_data_ready = port->data_ready;
1768         port->sock->sk->sk_user_data = NULL;
1769         write_unlock_bh(&port->sock->sk->sk_callback_lock);
1770         cancel_work_sync(&port->accept_work);
1771         /*
1772          * Destroy the remaining queues, which are not belong to any
1773          * controller yet.
1774          */
1775         nvmet_tcp_destroy_port_queues(port);
1776
1777         sock_release(port->sock);
1778         kfree(port);
1779 }
1780
1781 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1782 {
1783         struct nvmet_tcp_queue *queue;
1784
1785         mutex_lock(&nvmet_tcp_queue_mutex);
1786         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1787                 if (queue->nvme_sq.ctrl == ctrl)
1788                         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1789         mutex_unlock(&nvmet_tcp_queue_mutex);
1790 }
1791
1792 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1793 {
1794         struct nvmet_tcp_queue *queue =
1795                 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1796
1797         if (sq->qid == 0) {
1798                 /* Let inflight controller teardown complete */
1799                 flush_workqueue(nvmet_wq);
1800         }
1801
1802         queue->nr_cmds = sq->size * 2;
1803         if (nvmet_tcp_alloc_cmds(queue))
1804                 return NVME_SC_INTERNAL;
1805         return 0;
1806 }
1807
1808 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1809                 struct nvmet_port *nport, char *traddr)
1810 {
1811         struct nvmet_tcp_port *port = nport->priv;
1812
1813         if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1814                 struct nvmet_tcp_cmd *cmd =
1815                         container_of(req, struct nvmet_tcp_cmd, req);
1816                 struct nvmet_tcp_queue *queue = cmd->queue;
1817
1818                 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1819         } else {
1820                 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1821         }
1822 }
1823
1824 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1825         .owner                  = THIS_MODULE,
1826         .type                   = NVMF_TRTYPE_TCP,
1827         .msdbd                  = 1,
1828         .add_port               = nvmet_tcp_add_port,
1829         .remove_port            = nvmet_tcp_remove_port,
1830         .queue_response         = nvmet_tcp_queue_response,
1831         .delete_ctrl            = nvmet_tcp_delete_ctrl,
1832         .install_queue          = nvmet_tcp_install_queue,
1833         .disc_traddr            = nvmet_tcp_disc_port_addr,
1834 };
1835
1836 static int __init nvmet_tcp_init(void)
1837 {
1838         int ret;
1839
1840         nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
1841                                 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1842         if (!nvmet_tcp_wq)
1843                 return -ENOMEM;
1844
1845         ret = nvmet_register_transport(&nvmet_tcp_ops);
1846         if (ret)
1847                 goto err;
1848
1849         return 0;
1850 err:
1851         destroy_workqueue(nvmet_tcp_wq);
1852         return ret;
1853 }
1854
1855 static void __exit nvmet_tcp_exit(void)
1856 {
1857         struct nvmet_tcp_queue *queue;
1858
1859         nvmet_unregister_transport(&nvmet_tcp_ops);
1860
1861         flush_workqueue(nvmet_wq);
1862         mutex_lock(&nvmet_tcp_queue_mutex);
1863         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1864                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1865         mutex_unlock(&nvmet_tcp_queue_mutex);
1866         flush_workqueue(nvmet_wq);
1867
1868         destroy_workqueue(nvmet_tcp_wq);
1869 }
1870
1871 module_init(nvmet_tcp_init);
1872 module_exit(nvmet_tcp_exit);
1873
1874 MODULE_LICENSE("GPL v2");
1875 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */