blob: b3e765a95af8ee7447c536ff48095504c8100d67 [file] [log] [blame]
Christoph Hellwig77141dc2019-02-18 11:36:11 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwiga07b4972016-06-21 18:04:20 +02002/*
3 * Common code for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
Christoph Hellwiga07b4972016-06-21 18:04:20 +02005 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
Sagi Grimberg28b89112016-08-04 11:18:49 +03008#include <linux/random.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +01009#include <linux/rculist.h>
Logan Gunthorpec6925092018-10-04 15:27:47 -060010#include <linux/pci-p2pdma.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010011
Christoph Hellwiga07b4972016-06-21 18:04:20 +020012#include "nvmet.h"
13
Chaitanya Kulkarni55eb942e2018-06-20 00:01:41 -040014struct workqueue_struct *buffered_io_wq;
Christoph Hellwige929f062018-03-20 20:41:35 +010015static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
Sagi Grimberg15fbad92016-11-14 14:24:21 +020016static DEFINE_IDA(cntlid_ida);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020017
18/*
19 * This read/write semaphore is used to synchronize access to configuration
20 * information on a target system that will result in discovery log page
21 * information change for at least one host.
22 * The full list of resources to protected by this semaphore is:
23 *
24 * - subsystems list
25 * - per-subsystem allowed hosts list
26 * - allow_any_host subsystem attribute
27 * - nvmet_genctr
28 * - the nvmet_transports array
29 *
30 * When updating any of those lists/structures write lock should be obtained,
31 * while when reading (popolating discovery log page or checking host-subsystem
32 * link) read lock is obtained to allow concurrent reads.
33 */
34DECLARE_RWSEM(nvmet_config_sem);
35
Christoph Hellwig72efd252018-07-19 07:35:20 -070036u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
37u64 nvmet_ana_chgcnt;
38DECLARE_RWSEM(nvmet_ana_sem);
39
Chaitanya Kulkarnic6aa3542018-12-12 15:11:43 -080040inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
41{
42 u16 status;
43
44 switch (errno) {
45 case -ENOSPC:
46 req->error_loc = offsetof(struct nvme_rw_command, length);
47 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
48 break;
49 case -EREMOTEIO:
50 req->error_loc = offsetof(struct nvme_rw_command, slba);
51 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
52 break;
53 case -EOPNOTSUPP:
54 req->error_loc = offsetof(struct nvme_common_command, opcode);
55 switch (req->cmd->common.opcode) {
56 case nvme_cmd_dsm:
57 case nvme_cmd_write_zeroes:
58 status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
59 break;
60 default:
61 status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
62 }
63 break;
64 case -ENODATA:
65 req->error_loc = offsetof(struct nvme_rw_command, nsid);
66 status = NVME_SC_ACCESS_DENIED;
67 break;
68 case -EIO:
69 /* FALLTHRU */
70 default:
71 req->error_loc = offsetof(struct nvme_common_command, opcode);
72 status = NVME_SC_INTERNAL | NVME_SC_DNR;
73 }
74
75 return status;
76}
77
Christoph Hellwiga07b4972016-06-21 18:04:20 +020078static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
79 const char *subsysnqn);
80
81u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
82 size_t len)
83{
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -080084 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
85 req->error_loc = offsetof(struct nvme_common_command, dptr);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020086 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -080087 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +020088 return 0;
89}
90
91u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
92{
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -080093 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
94 req->error_loc = offsetof(struct nvme_common_command, dptr);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020095 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -080096 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +020097 return 0;
98}
99
Christoph Hellwigc7759ff2018-05-22 11:10:02 +0200100u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
101{
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800102 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
103 req->error_loc = offsetof(struct nvme_common_command, dptr);
Christoph Hellwigc7759ff2018-05-22 11:10:02 +0200104 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800105 }
Christoph Hellwigc7759ff2018-05-22 11:10:02 +0200106 return 0;
107}
108
Roy Shtermanba2dec32017-10-18 13:46:07 +0300109static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
110{
111 struct nvmet_ns *ns;
112
113 if (list_empty(&subsys->namespaces))
114 return 0;
115
116 ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
117 return ns->nsid;
118}
119
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200120static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
121{
122 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
123}
124
125static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
126{
127 struct nvmet_req *req;
128
129 while (1) {
130 mutex_lock(&ctrl->lock);
131 if (!ctrl->nr_async_event_cmds) {
132 mutex_unlock(&ctrl->lock);
133 return;
134 }
135
136 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
137 mutex_unlock(&ctrl->lock);
138 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
139 }
140}
141
142static void nvmet_async_event_work(struct work_struct *work)
143{
144 struct nvmet_ctrl *ctrl =
145 container_of(work, struct nvmet_ctrl, async_event_work);
146 struct nvmet_async_event *aen;
147 struct nvmet_req *req;
148
149 while (1) {
150 mutex_lock(&ctrl->lock);
151 aen = list_first_entry_or_null(&ctrl->async_events,
152 struct nvmet_async_event, entry);
153 if (!aen || !ctrl->nr_async_event_cmds) {
154 mutex_unlock(&ctrl->lock);
155 return;
156 }
157
158 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
159 nvmet_set_result(req, nvmet_async_event_result(aen));
160
161 list_del(&aen->entry);
162 kfree(aen);
163
164 mutex_unlock(&ctrl->lock);
165 nvmet_req_complete(req, 0);
166 }
167}
168
Jay Sternbergb662a072018-11-12 13:56:40 -0800169void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200170 u8 event_info, u8 log_page)
171{
172 struct nvmet_async_event *aen;
173
174 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
175 if (!aen)
176 return;
177
178 aen->event_type = event_type;
179 aen->event_info = event_info;
180 aen->log_page = log_page;
181
182 mutex_lock(&ctrl->lock);
183 list_add_tail(&aen->entry, &ctrl->async_events);
184 mutex_unlock(&ctrl->lock);
185
186 schedule_work(&ctrl->async_event_work);
187}
188
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200189static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
190{
191 u32 i;
192
193 mutex_lock(&ctrl->lock);
194 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
195 goto out_unlock;
196
197 for (i = 0; i < ctrl->nr_changed_ns; i++) {
198 if (ctrl->changed_ns_list[i] == nsid)
199 goto out_unlock;
200 }
201
202 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
203 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
204 ctrl->nr_changed_ns = U32_MAX;
205 goto out_unlock;
206 }
207
208 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
209out_unlock:
210 mutex_unlock(&ctrl->lock);
211}
212
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700213void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200214{
215 struct nvmet_ctrl *ctrl;
216
217 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
218 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
Jay Sternberg7114dde2018-11-12 13:56:34 -0800219 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
Christoph Hellwigc86b8f72018-05-30 15:04:47 +0200220 continue;
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200221 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
222 NVME_AER_NOTICE_NS_CHANGED,
223 NVME_LOG_CHANGED_NS);
224 }
225}
226
Christoph Hellwig62ac0d32018-06-01 08:59:25 +0200227void nvmet_send_ana_event(struct nvmet_subsys *subsys,
228 struct nvmet_port *port)
229{
230 struct nvmet_ctrl *ctrl;
231
232 mutex_lock(&subsys->lock);
233 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
234 if (port && ctrl->port != port)
235 continue;
Jay Sternberg7114dde2018-11-12 13:56:34 -0800236 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
Christoph Hellwig62ac0d32018-06-01 08:59:25 +0200237 continue;
238 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
239 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
240 }
241 mutex_unlock(&subsys->lock);
242}
243
244void nvmet_port_send_ana_event(struct nvmet_port *port)
245{
246 struct nvmet_subsys_link *p;
247
248 down_read(&nvmet_config_sem);
249 list_for_each_entry(p, &port->subsystems, entry)
250 nvmet_send_ana_event(p->subsys, port);
251 up_read(&nvmet_config_sem);
252}
253
Christoph Hellwige929f062018-03-20 20:41:35 +0100254int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200255{
256 int ret = 0;
257
258 down_write(&nvmet_config_sem);
259 if (nvmet_transports[ops->type])
260 ret = -EINVAL;
261 else
262 nvmet_transports[ops->type] = ops;
263 up_write(&nvmet_config_sem);
264
265 return ret;
266}
267EXPORT_SYMBOL_GPL(nvmet_register_transport);
268
Christoph Hellwige929f062018-03-20 20:41:35 +0100269void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200270{
271 down_write(&nvmet_config_sem);
272 nvmet_transports[ops->type] = NULL;
273 up_write(&nvmet_config_sem);
274}
275EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
276
277int nvmet_enable_port(struct nvmet_port *port)
278{
Christoph Hellwige929f062018-03-20 20:41:35 +0100279 const struct nvmet_fabrics_ops *ops;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200280 int ret;
281
282 lockdep_assert_held(&nvmet_config_sem);
283
284 ops = nvmet_transports[port->disc_addr.trtype];
285 if (!ops) {
286 up_write(&nvmet_config_sem);
287 request_module("nvmet-transport-%d", port->disc_addr.trtype);
288 down_write(&nvmet_config_sem);
289 ops = nvmet_transports[port->disc_addr.trtype];
290 if (!ops) {
291 pr_err("transport type %d not supported\n",
292 port->disc_addr.trtype);
293 return -EINVAL;
294 }
295 }
296
297 if (!try_module_get(ops->owner))
298 return -EINVAL;
299
300 ret = ops->add_port(port);
301 if (ret) {
302 module_put(ops->owner);
303 return ret;
304 }
305
Steve Wise0d5ee2b2018-06-20 07:15:10 -0700306 /* If the transport didn't set inline_data_size, then disable it. */
307 if (port->inline_data_size < 0)
308 port->inline_data_size = 0;
309
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200310 port->enabled = true;
311 return 0;
312}
313
314void nvmet_disable_port(struct nvmet_port *port)
315{
Christoph Hellwige929f062018-03-20 20:41:35 +0100316 const struct nvmet_fabrics_ops *ops;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200317
318 lockdep_assert_held(&nvmet_config_sem);
319
320 port->enabled = false;
321
322 ops = nvmet_transports[port->disc_addr.trtype];
323 ops->remove_port(port);
324 module_put(ops->owner);
325}
326
327static void nvmet_keep_alive_timer(struct work_struct *work)
328{
329 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
330 struct nvmet_ctrl, ka_work);
Sagi Grimbergc09305a2018-11-02 10:28:13 -0700331 bool cmd_seen = ctrl->cmd_seen;
332
333 ctrl->cmd_seen = false;
334 if (cmd_seen) {
335 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
336 ctrl->cntlid);
337 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
338 return;
339 }
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200340
341 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
342 ctrl->cntlid, ctrl->kato);
343
Sagi Grimberg23a8ed42017-01-01 13:18:26 +0200344 nvmet_ctrl_fatal_error(ctrl);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200345}
346
347static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
348{
349 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
350 ctrl->cntlid, ctrl->kato);
351
352 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
353 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
354}
355
356static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
357{
358 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
359
360 cancel_delayed_work_sync(&ctrl->ka_work);
361}
362
363static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
364 __le32 nsid)
365{
366 struct nvmet_ns *ns;
367
368 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
369 if (ns->nsid == le32_to_cpu(nsid))
370 return ns;
371 }
372
373 return NULL;
374}
375
376struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
377{
378 struct nvmet_ns *ns;
379
380 rcu_read_lock();
381 ns = __nvmet_find_namespace(ctrl, nsid);
382 if (ns)
383 percpu_ref_get(&ns->ref);
384 rcu_read_unlock();
385
386 return ns;
387}
388
389static void nvmet_destroy_namespace(struct percpu_ref *ref)
390{
391 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
392
393 complete(&ns->disable_done);
394}
395
396void nvmet_put_namespace(struct nvmet_ns *ns)
397{
398 percpu_ref_put(&ns->ref);
399}
400
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400401static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
402{
403 nvmet_bdev_ns_disable(ns);
404 nvmet_file_ns_disable(ns);
405}
406
Logan Gunthorpec6925092018-10-04 15:27:47 -0600407static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
408{
409 int ret;
410 struct pci_dev *p2p_dev;
411
412 if (!ns->use_p2pmem)
413 return 0;
414
415 if (!ns->bdev) {
416 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
417 return -EINVAL;
418 }
419
420 if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
421 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
422 ns->device_path);
423 return -EINVAL;
424 }
425
426 if (ns->p2p_dev) {
427 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
428 if (ret < 0)
429 return -EINVAL;
430 } else {
431 /*
432 * Right now we just check that there is p2pmem available so
433 * we can report an error to the user right away if there
434 * is not. We'll find the actual device to use once we
435 * setup the controller when the port's device is available.
436 */
437
438 p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
439 if (!p2p_dev) {
440 pr_err("no peer-to-peer memory is available for %s\n",
441 ns->device_path);
442 return -EINVAL;
443 }
444
445 pci_dev_put(p2p_dev);
446 }
447
448 return 0;
449}
450
451/*
452 * Note: ctrl->subsys->lock should be held when calling this function
453 */
454static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
455 struct nvmet_ns *ns)
456{
457 struct device *clients[2];
458 struct pci_dev *p2p_dev;
459 int ret;
460
Sagi Grimberg21d3bbd2018-11-02 16:12:21 -0700461 if (!ctrl->p2p_client || !ns->use_p2pmem)
Logan Gunthorpec6925092018-10-04 15:27:47 -0600462 return;
463
464 if (ns->p2p_dev) {
465 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
466 if (ret < 0)
467 return;
468
469 p2p_dev = pci_dev_get(ns->p2p_dev);
470 } else {
471 clients[0] = ctrl->p2p_client;
472 clients[1] = nvmet_ns_dev(ns);
473
474 p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
475 if (!p2p_dev) {
476 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
477 dev_name(ctrl->p2p_client), ns->device_path);
478 return;
479 }
480 }
481
482 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
483 if (ret < 0)
484 pci_dev_put(p2p_dev);
485
486 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
487 ns->nsid);
488}
489
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200490int nvmet_ns_enable(struct nvmet_ns *ns)
491{
492 struct nvmet_subsys *subsys = ns->subsys;
Logan Gunthorpec6925092018-10-04 15:27:47 -0600493 struct nvmet_ctrl *ctrl;
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200494 int ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200495
496 mutex_lock(&subsys->lock);
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200497 ret = -EMFILE;
498 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
499 goto out_unlock;
500 ret = 0;
Solganik Alexandere4fcf072016-10-30 10:35:15 +0200501 if (ns->enabled)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200502 goto out_unlock;
503
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400504 ret = nvmet_bdev_ns_enable(ns);
Hannes Reinecke405a7512018-07-25 08:34:45 +0200505 if (ret == -ENOTBLK)
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400506 ret = nvmet_file_ns_enable(ns);
507 if (ret)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200508 goto out_unlock;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200509
Logan Gunthorpec6925092018-10-04 15:27:47 -0600510 ret = nvmet_p2pmem_ns_enable(ns);
511 if (ret)
Max Gurtovoya536b492019-03-28 12:54:03 +0200512 goto out_dev_disable;
Logan Gunthorpec6925092018-10-04 15:27:47 -0600513
514 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
515 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
516
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200517 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
518 0, GFP_KERNEL);
519 if (ret)
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400520 goto out_dev_put;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200521
522 if (ns->nsid > subsys->max_nsid)
523 subsys->max_nsid = ns->nsid;
524
525 /*
526 * The namespaces list needs to be sorted to simplify the implementation
527 * of the Identify Namepace List subcommand.
528 */
529 if (list_empty(&subsys->namespaces)) {
530 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
531 } else {
532 struct nvmet_ns *old;
533
534 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
535 BUG_ON(ns->nsid == old->nsid);
536 if (ns->nsid < old->nsid)
537 break;
538 }
539
540 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
541 }
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200542 subsys->nr_namespaces++;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200543
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200544 nvmet_ns_changed(subsys, ns->nsid);
Solganik Alexandere4fcf072016-10-30 10:35:15 +0200545 ns->enabled = true;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200546 ret = 0;
547out_unlock:
548 mutex_unlock(&subsys->lock);
549 return ret;
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400550out_dev_put:
Logan Gunthorpec6925092018-10-04 15:27:47 -0600551 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
552 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
Max Gurtovoya536b492019-03-28 12:54:03 +0200553out_dev_disable:
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400554 nvmet_ns_dev_disable(ns);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200555 goto out_unlock;
556}
557
558void nvmet_ns_disable(struct nvmet_ns *ns)
559{
560 struct nvmet_subsys *subsys = ns->subsys;
Logan Gunthorpec6925092018-10-04 15:27:47 -0600561 struct nvmet_ctrl *ctrl;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200562
563 mutex_lock(&subsys->lock);
Solganik Alexandere4fcf072016-10-30 10:35:15 +0200564 if (!ns->enabled)
565 goto out_unlock;
566
567 ns->enabled = false;
568 list_del_rcu(&ns->dev_link);
Roy Shtermanba2dec32017-10-18 13:46:07 +0300569 if (ns->nsid == subsys->max_nsid)
570 subsys->max_nsid = nvmet_max_nsid(subsys);
Logan Gunthorpec6925092018-10-04 15:27:47 -0600571
572 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
573 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
574
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200575 mutex_unlock(&subsys->lock);
576
577 /*
578 * Now that we removed the namespaces from the lookup list, we
579 * can kill the per_cpu ref and wait for any remaining references
580 * to be dropped, as well as a RCU grace period for anyone only
581 * using the namepace under rcu_read_lock(). Note that we can't
582 * use call_rcu here as we need to ensure the namespaces have
583 * been fully destroyed before unloading the module.
584 */
585 percpu_ref_kill(&ns->ref);
586 synchronize_rcu();
587 wait_for_completion(&ns->disable_done);
588 percpu_ref_exit(&ns->ref);
589
590 mutex_lock(&subsys->lock);
Logan Gunthorpec6925092018-10-04 15:27:47 -0600591
Christoph Hellwig793c7cf2018-05-13 19:00:13 +0200592 subsys->nr_namespaces--;
Christoph Hellwigc16734e2018-05-25 17:16:09 +0200593 nvmet_ns_changed(subsys, ns->nsid);
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400594 nvmet_ns_dev_disable(ns);
Solganik Alexandere4fcf072016-10-30 10:35:15 +0200595out_unlock:
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200596 mutex_unlock(&subsys->lock);
597}
598
599void nvmet_ns_free(struct nvmet_ns *ns)
600{
601 nvmet_ns_disable(ns);
602
Christoph Hellwig72efd252018-07-19 07:35:20 -0700603 down_write(&nvmet_ana_sem);
604 nvmet_ana_group_enabled[ns->anagrpid]--;
605 up_write(&nvmet_ana_sem);
606
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200607 kfree(ns->device_path);
608 kfree(ns);
609}
610
611struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
612{
613 struct nvmet_ns *ns;
614
615 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
616 if (!ns)
617 return NULL;
618
619 INIT_LIST_HEAD(&ns->dev_link);
620 init_completion(&ns->disable_done);
621
622 ns->nsid = nsid;
623 ns->subsys = subsys;
Christoph Hellwig72efd252018-07-19 07:35:20 -0700624
625 down_write(&nvmet_ana_sem);
626 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
627 nvmet_ana_group_enabled[ns->anagrpid]++;
628 up_write(&nvmet_ana_sem);
629
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +0200630 uuid_gen(&ns->uuid);
Chaitanya Kulkarni55eb942e2018-06-20 00:01:41 -0400631 ns->buffered_io = false;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200632
633 return ns;
634}
635
Sagi Grimberge6a622f2018-11-19 14:11:12 -0800636static void nvmet_update_sq_head(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200637{
James Smartf9cf2a62017-10-18 14:33:59 -0700638 if (req->sq->size) {
Sagi Grimberge6a622f2018-11-19 14:11:12 -0800639 u32 old_sqhd, new_sqhd;
640
James Smartf9cf2a62017-10-18 14:33:59 -0700641 do {
642 old_sqhd = req->sq->sqhd;
643 new_sqhd = (old_sqhd + 1) % req->sq->size;
644 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
645 old_sqhd);
646 }
Sagi Grimberge6a622f2018-11-19 14:11:12 -0800647 req->rsp->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
648}
649
Chaitanya Kulkarni76574f32018-12-12 15:11:40 -0800650static void nvmet_set_error(struct nvmet_req *req, u16 status)
651{
652 struct nvmet_ctrl *ctrl = req->sq->ctrl;
653 struct nvme_error_slot *new_error_slot;
654 unsigned long flags;
655
656 req->rsp->status = cpu_to_le16(status << 1);
657
Chaitanya Kulkarni5698b802018-12-17 18:35:29 -0800658 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
Chaitanya Kulkarni76574f32018-12-12 15:11:40 -0800659 return;
660
661 spin_lock_irqsave(&ctrl->error_lock, flags);
662 ctrl->err_counter++;
663 new_error_slot =
664 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
665
666 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
667 new_error_slot->sqid = cpu_to_le16(req->sq->qid);
668 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
669 new_error_slot->status_field = cpu_to_le16(status << 1);
670 new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
671 new_error_slot->lba = cpu_to_le64(req->error_slba);
672 new_error_slot->nsid = req->cmd->common.nsid;
673 spin_unlock_irqrestore(&ctrl->error_lock, flags);
674
675 /* set the more bit for this request */
676 req->rsp->status |= cpu_to_le16(1 << 14);
677}
678
Sagi Grimberge6a622f2018-11-19 14:11:12 -0800679static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
680{
681 if (!req->sq->sqhd_disabled)
682 nvmet_update_sq_head(req);
James Smartbb1cc742017-09-18 09:08:29 -0700683 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200684 req->rsp->command_id = req->cmd->common.command_id;
Chaitanya Kulkarni76574f32018-12-12 15:11:40 -0800685
Chaitanya Kulkarnicb019da2018-11-19 13:35:30 -0800686 if (unlikely(status))
Chaitanya Kulkarni76574f32018-12-12 15:11:40 -0800687 nvmet_set_error(req, status);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200688 if (req->ns)
689 nvmet_put_namespace(req->ns);
690 req->ops->queue_response(req);
691}
692
693void nvmet_req_complete(struct nvmet_req *req, u16 status)
694{
695 __nvmet_req_complete(req, status);
696 percpu_ref_put(&req->sq->ref);
697}
698EXPORT_SYMBOL_GPL(nvmet_req_complete);
699
700void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
701 u16 qid, u16 size)
702{
703 cq->qid = qid;
704 cq->size = size;
705
706 ctrl->cqs[qid] = cq;
707}
708
709void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
710 u16 qid, u16 size)
711{
James Smartbb1cc742017-09-18 09:08:29 -0700712 sq->sqhd = 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200713 sq->qid = qid;
714 sq->size = size;
715
716 ctrl->sqs[qid] = sq;
717}
718
Sagi Grimberg427242c2017-03-06 18:46:20 +0200719static void nvmet_confirm_sq(struct percpu_ref *ref)
720{
721 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
722
723 complete(&sq->confirm_done);
724}
725
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200726void nvmet_sq_destroy(struct nvmet_sq *sq)
727{
728 /*
729 * If this is the admin queue, complete all AERs so that our
730 * queue doesn't have outstanding requests on it.
731 */
732 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
733 nvmet_async_events_free(sq->ctrl);
Sagi Grimberg427242c2017-03-06 18:46:20 +0200734 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
735 wait_for_completion(&sq->confirm_done);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200736 wait_for_completion(&sq->free_done);
737 percpu_ref_exit(&sq->ref);
738
739 if (sq->ctrl) {
740 nvmet_ctrl_put(sq->ctrl);
741 sq->ctrl = NULL; /* allows reusing the queue later */
742 }
743}
744EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
745
746static void nvmet_sq_free(struct percpu_ref *ref)
747{
748 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
749
750 complete(&sq->free_done);
751}
752
753int nvmet_sq_init(struct nvmet_sq *sq)
754{
755 int ret;
756
757 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
758 if (ret) {
759 pr_err("percpu_ref init failed!\n");
760 return ret;
761 }
762 init_completion(&sq->free_done);
Sagi Grimberg427242c2017-03-06 18:46:20 +0200763 init_completion(&sq->confirm_done);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200764
765 return 0;
766}
767EXPORT_SYMBOL_GPL(nvmet_sq_init);
768
Christoph Hellwig72efd252018-07-19 07:35:20 -0700769static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
770 struct nvmet_ns *ns)
771{
772 enum nvme_ana_state state = port->ana_state[ns->anagrpid];
773
774 if (unlikely(state == NVME_ANA_INACCESSIBLE))
775 return NVME_SC_ANA_INACCESSIBLE;
776 if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
777 return NVME_SC_ANA_PERSISTENT_LOSS;
778 if (unlikely(state == NVME_ANA_CHANGE))
779 return NVME_SC_ANA_TRANSITION;
780 return 0;
781}
782
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700783static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
784{
785 if (unlikely(req->ns->readonly)) {
786 switch (req->cmd->common.opcode) {
787 case nvme_cmd_read:
788 case nvme_cmd_flush:
789 break;
790 default:
791 return NVME_SC_NS_WRITE_PROTECTED;
792 }
793 }
794
795 return 0;
796}
797
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400798static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
799{
800 struct nvme_command *cmd = req->cmd;
801 u16 ret;
802
803 ret = nvmet_check_ctrl_status(req, cmd);
804 if (unlikely(ret))
805 return ret;
806
807 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800808 if (unlikely(!req->ns)) {
809 req->error_loc = offsetof(struct nvme_common_command, nsid);
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400810 return NVME_SC_INVALID_NS | NVME_SC_DNR;
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800811 }
Christoph Hellwig72efd252018-07-19 07:35:20 -0700812 ret = nvmet_check_ana_state(req->port, req->ns);
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800813 if (unlikely(ret)) {
814 req->error_loc = offsetof(struct nvme_common_command, nsid);
Christoph Hellwig72efd252018-07-19 07:35:20 -0700815 return ret;
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800816 }
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700817 ret = nvmet_io_cmd_check_access(req);
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800818 if (unlikely(ret)) {
819 req->error_loc = offsetof(struct nvme_common_command, nsid);
Chaitanya Kulkarnidedf0be2018-08-07 23:01:07 -0700820 return ret;
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800821 }
Chaitanya Kulkarnid5eff332018-05-23 00:34:39 -0400822
823 if (req->ns->file)
824 return nvmet_file_parse_io_cmd(req);
825 else
826 return nvmet_bdev_parse_io_cmd(req);
827}
828
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200829bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
Christoph Hellwige929f062018-03-20 20:41:35 +0100830 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200831{
832 u8 flags = req->cmd->common.flags;
833 u16 status;
834
835 req->cq = cq;
836 req->sq = sq;
837 req->ops = ops;
838 req->sg = NULL;
839 req->sg_cnt = 0;
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +0100840 req->transfer_len = 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200841 req->rsp->status = 0;
Sagi Grimberge6a622f2018-11-19 14:11:12 -0800842 req->rsp->sq_head = 0;
Sagi Grimberg423b44872018-01-14 18:34:22 +0200843 req->ns = NULL;
Chaitanya Kulkarni5698b802018-12-17 18:35:29 -0800844 req->error_loc = NVMET_NO_ERROR_LOC;
Chaitanya Kulkarnie4a97622018-12-12 15:11:39 -0800845 req->error_slba = 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200846
847 /* no support for fused commands yet */
848 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800849 req->error_loc = offsetof(struct nvme_common_command, flags);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200850 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
851 goto fail;
852 }
853
Max Gurtovoybffd2b62018-01-24 17:31:45 +0200854 /*
855 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
856 * contains an address of a single contiguous physical buffer that is
857 * byte aligned.
858 */
859 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800860 req->error_loc = offsetof(struct nvme_common_command, flags);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200861 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
862 goto fail;
863 }
864
865 if (unlikely(!req->sq->ctrl))
866 /* will return an error for any Non-connect command: */
867 status = nvmet_parse_connect_cmd(req);
868 else if (likely(req->sq->qid != 0))
869 status = nvmet_parse_io_cmd(req);
870 else if (req->cmd->common.opcode == nvme_fabrics_command)
871 status = nvmet_parse_fabrics_cmd(req);
872 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
873 status = nvmet_parse_discovery_cmd(req);
874 else
875 status = nvmet_parse_admin_cmd(req);
876
877 if (status)
878 goto fail;
879
880 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
881 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
882 goto fail;
883 }
884
Sagi Grimbergc09305a2018-11-02 10:28:13 -0700885 if (sq->ctrl)
886 sq->ctrl->cmd_seen = true;
887
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200888 return true;
889
890fail:
891 __nvmet_req_complete(req, status);
892 return false;
893}
894EXPORT_SYMBOL_GPL(nvmet_req_init);
895
Vijay Immanuel549f01a2017-05-08 16:38:35 -0700896void nvmet_req_uninit(struct nvmet_req *req)
897{
898 percpu_ref_put(&req->sq->ref);
Sagi Grimberg423b44872018-01-14 18:34:22 +0200899 if (req->ns)
900 nvmet_put_namespace(req->ns);
Vijay Immanuel549f01a2017-05-08 16:38:35 -0700901}
902EXPORT_SYMBOL_GPL(nvmet_req_uninit);
903
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +0100904void nvmet_req_execute(struct nvmet_req *req)
905{
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800906 if (unlikely(req->data_len != req->transfer_len)) {
907 req->error_loc = offsetof(struct nvme_common_command, dptr);
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +0100908 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
Chaitanya Kulkarnie81446a2018-12-12 15:11:41 -0800909 } else
Christoph Hellwig5e62d5c2017-11-09 14:29:58 +0100910 req->execute(req);
911}
912EXPORT_SYMBOL_GPL(nvmet_req_execute);
913
Logan Gunthorpe5b2322e2018-10-04 15:27:46 -0600914int nvmet_req_alloc_sgl(struct nvmet_req *req)
915{
Logan Gunthorpec6925092018-10-04 15:27:47 -0600916 struct pci_dev *p2p_dev = NULL;
917
918 if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
919 if (req->sq->ctrl && req->ns)
920 p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
921 req->ns->nsid);
922
923 req->p2p_dev = NULL;
924 if (req->sq->qid && p2p_dev) {
925 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
926 req->transfer_len);
927 if (req->sg) {
928 req->p2p_dev = p2p_dev;
929 return 0;
930 }
931 }
932
933 /*
934 * If no P2P memory was available we fallback to using
935 * regular memory
936 */
937 }
938
Logan Gunthorpe5b2322e2018-10-04 15:27:46 -0600939 req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
940 if (!req->sg)
941 return -ENOMEM;
942
943 return 0;
944}
945EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
946
947void nvmet_req_free_sgl(struct nvmet_req *req)
948{
Logan Gunthorpec6925092018-10-04 15:27:47 -0600949 if (req->p2p_dev)
950 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
951 else
952 sgl_free(req->sg);
953
Logan Gunthorpe5b2322e2018-10-04 15:27:46 -0600954 req->sg = NULL;
955 req->sg_cnt = 0;
956}
957EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
958
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200959static inline bool nvmet_cc_en(u32 cc)
960{
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300961 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200962}
963
964static inline u8 nvmet_cc_css(u32 cc)
965{
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300966 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200967}
968
969static inline u8 nvmet_cc_mps(u32 cc)
970{
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300971 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200972}
973
974static inline u8 nvmet_cc_ams(u32 cc)
975{
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300976 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200977}
978
979static inline u8 nvmet_cc_shn(u32 cc)
980{
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300981 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200982}
983
984static inline u8 nvmet_cc_iosqes(u32 cc)
985{
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300986 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200987}
988
989static inline u8 nvmet_cc_iocqes(u32 cc)
990{
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300991 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200992}
993
994static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
995{
996 lockdep_assert_held(&ctrl->lock);
997
998 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
999 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
1000 nvmet_cc_mps(ctrl->cc) != 0 ||
1001 nvmet_cc_ams(ctrl->cc) != 0 ||
1002 nvmet_cc_css(ctrl->cc) != 0) {
1003 ctrl->csts = NVME_CSTS_CFS;
1004 return;
1005 }
1006
1007 ctrl->csts = NVME_CSTS_RDY;
Max Gurtuvoyd68a90e2018-06-19 15:45:33 +03001008
1009 /*
1010 * Controllers that are not yet enabled should not really enforce the
1011 * keep alive timeout, but we still want to track a timeout and cleanup
1012 * in case a host died before it enabled the controller. Hence, simply
1013 * reset the keep alive timer when the controller is enabled.
1014 */
1015 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001016}
1017
1018static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1019{
1020 lockdep_assert_held(&ctrl->lock);
1021
1022 /* XXX: tear down queues? */
1023 ctrl->csts &= ~NVME_CSTS_RDY;
1024 ctrl->cc = 0;
1025}
1026
1027void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1028{
1029 u32 old;
1030
1031 mutex_lock(&ctrl->lock);
1032 old = ctrl->cc;
1033 ctrl->cc = new;
1034
1035 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1036 nvmet_start_ctrl(ctrl);
1037 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1038 nvmet_clear_ctrl(ctrl);
1039 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1040 nvmet_clear_ctrl(ctrl);
1041 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1042 }
1043 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1044 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1045 mutex_unlock(&ctrl->lock);
1046}
1047
1048static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1049{
1050 /* command sets supported: NVMe command set: */
1051 ctrl->cap = (1ULL << 37);
1052 /* CC.EN timeout in 500msec units: */
1053 ctrl->cap |= (15ULL << 24);
1054 /* maximum queue entries supported: */
1055 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1056}
1057
1058u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
1059 struct nvmet_req *req, struct nvmet_ctrl **ret)
1060{
1061 struct nvmet_subsys *subsys;
1062 struct nvmet_ctrl *ctrl;
1063 u16 status = 0;
1064
1065 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1066 if (!subsys) {
1067 pr_warn("connect request for invalid subsystem %s!\n",
1068 subsysnqn);
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001069 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001070 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1071 }
1072
1073 mutex_lock(&subsys->lock);
1074 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1075 if (ctrl->cntlid == cntlid) {
1076 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1077 pr_warn("hostnqn mismatch.\n");
1078 continue;
1079 }
1080 if (!kref_get_unless_zero(&ctrl->ref))
1081 continue;
1082
1083 *ret = ctrl;
1084 goto out;
1085 }
1086 }
1087
1088 pr_warn("could not find controller %d for subsys %s / host %s\n",
1089 cntlid, subsysnqn, hostnqn);
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001090 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001091 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1092
1093out:
1094 mutex_unlock(&subsys->lock);
1095 nvmet_subsys_put(subsys);
1096 return status;
1097}
1098
Parav Pandit64a0ca82017-02-27 23:21:33 -06001099u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
1100{
1101 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
Chaitanya Kulkarnib40b83e2018-05-08 02:02:33 -04001102 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
Parav Pandit64a0ca82017-02-27 23:21:33 -06001103 cmd->common.opcode, req->sq->qid);
1104 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1105 }
1106
1107 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
Chaitanya Kulkarnib40b83e2018-05-08 02:02:33 -04001108 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
Parav Pandit64a0ca82017-02-27 23:21:33 -06001109 cmd->common.opcode, req->sq->qid);
Parav Pandit64a0ca82017-02-27 23:21:33 -06001110 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1111 }
1112 return 0;
1113}
1114
Sagi Grimberg253928e2018-11-12 13:56:39 -08001115bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001116{
1117 struct nvmet_host_link *p;
1118
Sagi Grimberg253928e2018-11-12 13:56:39 -08001119 lockdep_assert_held(&nvmet_config_sem);
1120
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001121 if (subsys->allow_any_host)
1122 return true;
1123
Sagi Grimberg253928e2018-11-12 13:56:39 -08001124 if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
1125 return true;
1126
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001127 list_for_each_entry(p, &subsys->hosts, entry) {
1128 if (!strcmp(nvmet_host_name(p->host), hostnqn))
1129 return true;
1130 }
1131
1132 return false;
1133}
1134
Logan Gunthorpec6925092018-10-04 15:27:47 -06001135/*
1136 * Note: ctrl->subsys->lock should be held when calling this function
1137 */
1138static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1139 struct nvmet_req *req)
1140{
1141 struct nvmet_ns *ns;
1142
1143 if (!req->p2p_client)
1144 return;
1145
1146 ctrl->p2p_client = get_device(req->p2p_client);
1147
1148 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
1149 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1150}
1151
1152/*
1153 * Note: ctrl->subsys->lock should be held when calling this function
1154 */
1155static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1156{
1157 struct radix_tree_iter iter;
1158 void __rcu **slot;
1159
1160 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1161 pci_dev_put(radix_tree_deref_slot(slot));
1162
1163 put_device(ctrl->p2p_client);
1164}
1165
Yufen Yud11de632019-03-13 18:54:59 +01001166static void nvmet_fatal_error_handler(struct work_struct *work)
1167{
1168 struct nvmet_ctrl *ctrl =
1169 container_of(work, struct nvmet_ctrl, fatal_err_work);
1170
1171 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1172 ctrl->ops->delete_ctrl(ctrl);
1173}
1174
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001175u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1176 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1177{
1178 struct nvmet_subsys *subsys;
1179 struct nvmet_ctrl *ctrl;
1180 int ret;
1181 u16 status;
1182
1183 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1184 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1185 if (!subsys) {
1186 pr_warn("connect request for invalid subsystem %s!\n",
1187 subsysnqn);
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001188 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001189 goto out;
1190 }
1191
1192 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1193 down_read(&nvmet_config_sem);
Sagi Grimberg253928e2018-11-12 13:56:39 -08001194 if (!nvmet_host_allowed(subsys, hostnqn)) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001195 pr_info("connect by host %s for subsystem %s not allowed\n",
1196 hostnqn, subsysnqn);
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001197 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001198 up_read(&nvmet_config_sem);
Guan Junxiong130c24b2017-08-04 17:27:47 +08001199 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001200 goto out_put_subsystem;
1201 }
1202 up_read(&nvmet_config_sem);
1203
1204 status = NVME_SC_INTERNAL;
1205 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1206 if (!ctrl)
1207 goto out_put_subsystem;
1208 mutex_init(&ctrl->lock);
1209
1210 nvmet_init_cap(ctrl);
1211
Christoph Hellwig4ee43282018-06-07 15:09:50 +02001212 ctrl->port = req->port;
1213
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001214 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1215 INIT_LIST_HEAD(&ctrl->async_events);
Logan Gunthorpec6925092018-10-04 15:27:47 -06001216 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
Yufen Yud11de632019-03-13 18:54:59 +01001217 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001218
1219 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1220 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1221
1222 kref_init(&ctrl->ref);
1223 ctrl->subsys = subsys;
Christoph Hellwigc86b8f72018-05-30 15:04:47 +02001224 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001225
Christoph Hellwigc16734e2018-05-25 17:16:09 +02001226 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1227 sizeof(__le32), GFP_KERNEL);
1228 if (!ctrl->changed_ns_list)
1229 goto out_free_ctrl;
1230
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001231 ctrl->cqs = kcalloc(subsys->max_qid + 1,
1232 sizeof(struct nvmet_cq *),
1233 GFP_KERNEL);
1234 if (!ctrl->cqs)
Christoph Hellwigc16734e2018-05-25 17:16:09 +02001235 goto out_free_changed_ns_list;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001236
1237 ctrl->sqs = kcalloc(subsys->max_qid + 1,
1238 sizeof(struct nvmet_sq *),
1239 GFP_KERNEL);
1240 if (!ctrl->sqs)
1241 goto out_free_cqs;
1242
Sagi Grimberg15fbad92016-11-14 14:24:21 +02001243 ret = ida_simple_get(&cntlid_ida,
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001244 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
1245 GFP_KERNEL);
1246 if (ret < 0) {
1247 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1248 goto out_free_sqs;
1249 }
1250 ctrl->cntlid = ret;
1251
1252 ctrl->ops = req->ops;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001253
Jay Sternbergf9362ac2018-11-12 13:56:35 -08001254 /*
1255 * Discovery controllers may use some arbitrary high value
1256 * in order to cleanup stale discovery sessions
1257 */
1258 if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
1259 kato = NVMET_DISC_KATO_MS;
1260
1261 /* keep-alive timeout in seconds */
1262 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1263
Chaitanya Kulkarnie4a97622018-12-12 15:11:39 -08001264 ctrl->err_counter = 0;
1265 spin_lock_init(&ctrl->error_lock);
1266
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001267 nvmet_start_keep_alive_timer(ctrl);
1268
1269 mutex_lock(&subsys->lock);
1270 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
Logan Gunthorpec6925092018-10-04 15:27:47 -06001271 nvmet_setup_p2p_ns_map(ctrl, req);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001272 mutex_unlock(&subsys->lock);
1273
1274 *ctrlp = ctrl;
1275 return 0;
1276
1277out_free_sqs:
1278 kfree(ctrl->sqs);
1279out_free_cqs:
1280 kfree(ctrl->cqs);
Christoph Hellwigc16734e2018-05-25 17:16:09 +02001281out_free_changed_ns_list:
1282 kfree(ctrl->changed_ns_list);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001283out_free_ctrl:
1284 kfree(ctrl);
1285out_put_subsystem:
1286 nvmet_subsys_put(subsys);
1287out:
1288 return status;
1289}
1290
1291static void nvmet_ctrl_free(struct kref *ref)
1292{
1293 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1294 struct nvmet_subsys *subsys = ctrl->subsys;
1295
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001296 mutex_lock(&subsys->lock);
Logan Gunthorpec6925092018-10-04 15:27:47 -06001297 nvmet_release_p2p_ns_map(ctrl);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001298 list_del(&ctrl->subsys_entry);
1299 mutex_unlock(&subsys->lock);
1300
Israel Rukshin6b1943a2017-11-13 12:29:41 +00001301 nvmet_stop_keep_alive_timer(ctrl);
1302
Sagi Grimberg06406d82017-01-01 13:41:56 +02001303 flush_work(&ctrl->async_event_work);
1304 cancel_work_sync(&ctrl->fatal_err_work);
1305
Sagi Grimberg15fbad92016-11-14 14:24:21 +02001306 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001307
1308 kfree(ctrl->sqs);
1309 kfree(ctrl->cqs);
Christoph Hellwigc16734e2018-05-25 17:16:09 +02001310 kfree(ctrl->changed_ns_list);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001311 kfree(ctrl);
Israel Rukshin6b1943a2017-11-13 12:29:41 +00001312
1313 nvmet_subsys_put(subsys);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001314}
1315
1316void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1317{
1318 kref_put(&ctrl->ref, nvmet_ctrl_free);
1319}
1320
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001321void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1322{
Sagi Grimberg8242dda2016-11-06 11:03:30 +02001323 mutex_lock(&ctrl->lock);
1324 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1325 ctrl->csts |= NVME_CSTS_CFS;
Sagi Grimberg8242dda2016-11-06 11:03:30 +02001326 schedule_work(&ctrl->fatal_err_work);
1327 }
1328 mutex_unlock(&ctrl->lock);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001329}
1330EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1331
1332static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1333 const char *subsysnqn)
1334{
1335 struct nvmet_subsys_link *p;
1336
1337 if (!port)
1338 return NULL;
1339
Bart Van Assche43a6f8f2018-10-08 14:28:49 -07001340 if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001341 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1342 return NULL;
1343 return nvmet_disc_subsys;
1344 }
1345
1346 down_read(&nvmet_config_sem);
1347 list_for_each_entry(p, &port->subsystems, entry) {
1348 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1349 NVMF_NQN_SIZE)) {
1350 if (!kref_get_unless_zero(&p->subsys->ref))
1351 break;
1352 up_read(&nvmet_config_sem);
1353 return p->subsys;
1354 }
1355 }
1356 up_read(&nvmet_config_sem);
1357 return NULL;
1358}
1359
1360struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1361 enum nvme_subsys_type type)
1362{
1363 struct nvmet_subsys *subsys;
1364
1365 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1366 if (!subsys)
1367 return NULL;
1368
Johannes Thumshirn637dc0f2017-06-07 11:45:32 +02001369 subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
Johannes Thumshirn2e7f5d22017-07-14 15:36:55 +02001370 /* generate a random serial number as our controllers are ephemeral: */
1371 get_random_bytes(&subsys->serial, sizeof(subsys->serial));
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001372
1373 switch (type) {
1374 case NVME_NQN_NVME:
1375 subsys->max_qid = NVMET_NR_QUEUES;
1376 break;
1377 case NVME_NQN_DISC:
1378 subsys->max_qid = 0;
1379 break;
1380 default:
1381 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1382 kfree(subsys);
1383 return NULL;
1384 }
1385 subsys->type = type;
1386 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1387 GFP_KERNEL);
Wei Yongjun69555af2016-07-06 12:02:09 +00001388 if (!subsys->subsysnqn) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001389 kfree(subsys);
1390 return NULL;
1391 }
1392
1393 kref_init(&subsys->ref);
1394
1395 mutex_init(&subsys->lock);
1396 INIT_LIST_HEAD(&subsys->namespaces);
1397 INIT_LIST_HEAD(&subsys->ctrls);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001398 INIT_LIST_HEAD(&subsys->hosts);
1399
1400 return subsys;
1401}
1402
1403static void nvmet_subsys_free(struct kref *ref)
1404{
1405 struct nvmet_subsys *subsys =
1406 container_of(ref, struct nvmet_subsys, ref);
1407
1408 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
1409
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001410 kfree(subsys->subsysnqn);
1411 kfree(subsys);
1412}
1413
Sagi Grimberg344770b2016-11-27 22:29:17 +02001414void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1415{
1416 struct nvmet_ctrl *ctrl;
1417
1418 mutex_lock(&subsys->lock);
1419 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1420 ctrl->ops->delete_ctrl(ctrl);
1421 mutex_unlock(&subsys->lock);
1422}
1423
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001424void nvmet_subsys_put(struct nvmet_subsys *subsys)
1425{
1426 kref_put(&subsys->ref, nvmet_subsys_free);
1427}
1428
1429static int __init nvmet_init(void)
1430{
1431 int error;
1432
Christoph Hellwig72efd252018-07-19 07:35:20 -07001433 nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1434
Chaitanya Kulkarni55eb942e2018-06-20 00:01:41 -04001435 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1436 WQ_MEM_RECLAIM, 0);
1437 if (!buffered_io_wq) {
1438 error = -ENOMEM;
1439 goto out;
1440 }
Christoph Hellwig72efd252018-07-19 07:35:20 -07001441
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001442 error = nvmet_init_discovery();
1443 if (error)
Chaitanya Kulkarni04db0e52018-08-15 18:48:25 -07001444 goto out_free_work_queue;
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001445
1446 error = nvmet_init_configfs();
1447 if (error)
1448 goto out_exit_discovery;
1449 return 0;
1450
1451out_exit_discovery:
1452 nvmet_exit_discovery();
Chaitanya Kulkarni04db0e52018-08-15 18:48:25 -07001453out_free_work_queue:
1454 destroy_workqueue(buffered_io_wq);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001455out:
1456 return error;
1457}
1458
1459static void __exit nvmet_exit(void)
1460{
1461 nvmet_exit_configfs();
1462 nvmet_exit_discovery();
Sagi Grimberg15fbad92016-11-14 14:24:21 +02001463 ida_destroy(&cntlid_ida);
Chaitanya Kulkarni55eb942e2018-06-20 00:01:41 -04001464 destroy_workqueue(buffered_io_wq);
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001465
1466 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1467 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1468}
1469
1470module_init(nvmet_init);
1471module_exit(nvmet_exit);
1472
1473MODULE_LICENSE("GPL v2");