2 * Copyright (C) 2017 NXP Semiconductors
3 * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
5 * SPDX-License-Identifier: GPL-2.0+
13 #include <dm/device-internal.h>
16 struct nvme_info *nvme_info;
18 #define NVME_Q_DEPTH 2
19 #define NVME_AQ_DEPTH 2
20 #define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
21 #define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
22 #define ADMIN_TIMEOUT 60
24 #define MAX_PRP_POOL 512
27 * An NVM Express queue. Each device has at least two (one for admin
28 * commands and one for I/O commands).
32 struct nvme_command *sq_cmds;
33 struct nvme_completion *cqes;
34 wait_queue_head_t sq_full;
44 unsigned long cmdid_data[];
47 static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
49 u32 bit = enabled ? NVME_CSTS_RDY : 0;
53 /* Timeout field in the CAP register is in 500 millisecond units */
54 timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
57 while (get_timer(start) < timeout) {
58 if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit)
65 static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
66 int total_len, u64 dma_addr)
68 u32 page_size = dev->page_size;
69 int offset = dma_addr & (page_size - 1);
71 int length = total_len;
73 length -= (page_size - offset);
81 dma_addr += (page_size - offset);
83 if (length <= page_size) {
88 nprps = DIV_ROUND_UP(length, page_size);
90 if (nprps > dev->prp_entry_num) {
92 dev->prp_pool = malloc(nprps << 3);
94 printf("Error: malloc prp_pool fail\n");
97 dev->prp_entry_num = nprps;
100 prp_pool = dev->prp_pool;
103 if (i == ((page_size >> 3) - 1)) {
104 *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
107 prp_pool += page_size;
109 *(prp_pool + i++) = cpu_to_le64(dma_addr);
110 dma_addr += page_size;
113 *prp2 = (ulong)dev->prp_pool;
118 static __le16 nvme_get_cmd_id(void)
120 static unsigned short cmdid;
122 return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
125 static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
127 u64 start = (ulong)&nvmeq->cqes[index];
128 u64 stop = start + sizeof(struct nvme_completion);
130 invalidate_dcache_range(start, stop);
132 return le16_to_cpu(readw(&(nvmeq->cqes[index].status)));
136 * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
138 * @nvmeq: The queue to use
139 * @cmd: The command to send
141 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
143 u16 tail = nvmeq->sq_tail;
145 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
146 flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
147 (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
149 if (++tail == nvmeq->q_depth)
151 writel(tail, nvmeq->q_db);
152 nvmeq->sq_tail = tail;
155 static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
156 struct nvme_command *cmd,
157 u32 *result, unsigned timeout)
159 u16 head = nvmeq->cq_head;
160 u16 phase = nvmeq->cq_phase;
163 ulong timeout_us = timeout * 100000;
165 cmd->common.command_id = nvme_get_cmd_id();
166 nvme_submit_cmd(nvmeq, cmd);
168 start_time = timer_get_us();
171 status = nvme_read_completion_status(nvmeq, head);
172 if ((status & 0x01) == phase)
174 if (timeout_us > 0 && (timer_get_us() - start_time)
181 printf("ERROR: status = %x, phase = %d, head = %d\n",
182 status, phase, head);
184 if (++head == nvmeq->q_depth) {
188 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
189 nvmeq->cq_head = head;
190 nvmeq->cq_phase = phase;
196 *result = le32_to_cpu(readl(&(nvmeq->cqes[head].result)));
198 if (++head == nvmeq->q_depth) {
202 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
203 nvmeq->cq_head = head;
204 nvmeq->cq_phase = phase;
209 static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
212 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
215 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
218 struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
221 memset(nvmeq, 0, sizeof(*nvmeq));
223 nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth));
226 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
228 nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
231 memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
237 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
238 nvmeq->q_depth = depth;
241 dev->queues[qid] = nvmeq;
246 free((void *)nvmeq->cqes);
253 static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
255 struct nvme_command c;
257 memset(&c, 0, sizeof(c));
258 c.delete_queue.opcode = opcode;
259 c.delete_queue.qid = cpu_to_le16(id);
261 return nvme_submit_admin_cmd(dev, &c, NULL);
264 static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
266 return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
269 static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
271 return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
274 static int nvme_enable_ctrl(struct nvme_dev *dev)
276 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
277 dev->ctrl_config |= NVME_CC_ENABLE;
278 writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
280 return nvme_wait_ready(dev, true);
283 static int nvme_disable_ctrl(struct nvme_dev *dev)
285 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
286 dev->ctrl_config &= ~NVME_CC_ENABLE;
287 writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
289 return nvme_wait_ready(dev, false);
292 static void nvme_free_queue(struct nvme_queue *nvmeq)
294 free((void *)nvmeq->cqes);
295 free(nvmeq->sq_cmds);
299 static void nvme_free_queues(struct nvme_dev *dev, int lowest)
303 for (i = dev->queue_count - 1; i >= lowest; i--) {
304 struct nvme_queue *nvmeq = dev->queues[i];
306 dev->queues[i] = NULL;
307 nvme_free_queue(nvmeq);
311 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
313 struct nvme_dev *dev = nvmeq->dev;
318 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
319 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
320 flush_dcache_range((ulong)nvmeq->cqes,
321 (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth));
322 dev->online_queues++;
325 static int nvme_configure_admin_queue(struct nvme_dev *dev)
330 struct nvme_queue *nvmeq;
331 /* most architectures use 4KB as the page size */
332 unsigned page_shift = 12;
333 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
334 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
336 if (page_shift < dev_page_min) {
337 debug("Device minimum page size (%u) too large for host (%u)\n",
338 1 << dev_page_min, 1 << page_shift);
342 if (page_shift > dev_page_max) {
343 debug("Device maximum page size (%u) smaller than host (%u)\n",
344 1 << dev_page_max, 1 << page_shift);
345 page_shift = dev_page_max;
348 result = nvme_disable_ctrl(dev);
352 nvmeq = dev->queues[0];
354 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
359 aqa = nvmeq->q_depth - 1;
363 dev->page_size = 1 << page_shift;
365 dev->ctrl_config = NVME_CC_CSS_NVM;
366 dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
367 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
368 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
370 writel(aqa, &dev->bar->aqa);
371 nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
372 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
374 result = nvme_enable_ctrl(dev);
378 nvmeq->cq_vector = 0;
380 nvme_init_queue(dev->queues[0], 0);
385 nvme_free_queues(dev, 0);
390 static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
391 struct nvme_queue *nvmeq)
393 struct nvme_command c;
394 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
396 memset(&c, 0, sizeof(c));
397 c.create_cq.opcode = nvme_admin_create_cq;
398 c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
399 c.create_cq.cqid = cpu_to_le16(qid);
400 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
401 c.create_cq.cq_flags = cpu_to_le16(flags);
402 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
404 return nvme_submit_admin_cmd(dev, &c, NULL);
407 static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
408 struct nvme_queue *nvmeq)
410 struct nvme_command c;
411 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
413 memset(&c, 0, sizeof(c));
414 c.create_sq.opcode = nvme_admin_create_sq;
415 c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
416 c.create_sq.sqid = cpu_to_le16(qid);
417 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
418 c.create_sq.sq_flags = cpu_to_le16(flags);
419 c.create_sq.cqid = cpu_to_le16(qid);
421 return nvme_submit_admin_cmd(dev, &c, NULL);
424 int nvme_identify(struct nvme_dev *dev, unsigned nsid,
425 unsigned cns, dma_addr_t dma_addr)
427 struct nvme_command c;
428 u32 page_size = dev->page_size;
429 int offset = dma_addr & (page_size - 1);
430 int length = sizeof(struct nvme_id_ctrl);
432 memset(&c, 0, sizeof(c));
433 c.identify.opcode = nvme_admin_identify;
434 c.identify.nsid = cpu_to_le32(nsid);
435 c.identify.prp1 = cpu_to_le64(dma_addr);
437 length -= (page_size - offset);
441 dma_addr += (page_size - offset);
442 c.identify.prp2 = cpu_to_le64(dma_addr);
445 c.identify.cns = cpu_to_le32(cns);
447 return nvme_submit_admin_cmd(dev, &c, NULL);
450 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
451 dma_addr_t dma_addr, u32 *result)
453 struct nvme_command c;
455 memset(&c, 0, sizeof(c));
456 c.features.opcode = nvme_admin_get_features;
457 c.features.nsid = cpu_to_le32(nsid);
458 c.features.prp1 = cpu_to_le64(dma_addr);
459 c.features.fid = cpu_to_le32(fid);
461 return nvme_submit_admin_cmd(dev, &c, result);
464 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
465 dma_addr_t dma_addr, u32 *result)
467 struct nvme_command c;
469 memset(&c, 0, sizeof(c));
470 c.features.opcode = nvme_admin_set_features;
471 c.features.prp1 = cpu_to_le64(dma_addr);
472 c.features.fid = cpu_to_le32(fid);
473 c.features.dword11 = cpu_to_le32(dword11);
475 return nvme_submit_admin_cmd(dev, &c, result);
478 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
480 struct nvme_dev *dev = nvmeq->dev;
483 nvmeq->cq_vector = qid - 1;
484 result = nvme_alloc_cq(dev, qid, nvmeq);
488 result = nvme_alloc_sq(dev, qid, nvmeq);
492 nvme_init_queue(nvmeq, qid);
497 nvme_delete_sq(dev, qid);
499 nvme_delete_cq(dev, qid);
504 static int nvme_set_queue_count(struct nvme_dev *dev, int count)
508 u32 q_count = (count - 1) | ((count - 1) << 16);
510 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
511 q_count, 0, &result);
518 return min(result & 0xffff, result >> 16) + 1;
521 static void nvme_create_io_queues(struct nvme_dev *dev)
525 for (i = dev->queue_count; i <= dev->max_qid; i++)
526 if (!nvme_alloc_queue(dev, i, dev->q_depth))
529 for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
530 if (nvme_create_queue(dev->queues[i], i))
534 static int nvme_setup_io_queues(struct nvme_dev *dev)
540 result = nvme_set_queue_count(dev, nr_io_queues);
544 if (result < nr_io_queues)
545 nr_io_queues = result;
547 dev->max_qid = nr_io_queues;
549 /* Free previously allocated queues */
550 nvme_free_queues(dev, nr_io_queues + 1);
551 nvme_create_io_queues(dev);
556 static int nvme_get_info_from_identify(struct nvme_dev *dev)
558 struct nvme_id_ctrl buf, *ctrl = &buf;
560 int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
562 ret = nvme_identify(dev, 0, 1, (dma_addr_t)ctrl);
566 dev->nn = le32_to_cpu(ctrl->nn);
567 dev->vwc = ctrl->vwc;
568 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
569 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
570 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
572 dev->max_transfer_shift = (ctrl->mdts + shift);
575 * Maximum Data Transfer Size (MDTS) field indicates the maximum
576 * data transfer size between the host and the controller. The
577 * host should not submit a command that exceeds this transfer
578 * size. The value is in units of the minimum memory page size
579 * and is reported as a power of two (2^n).
581 * The spec also says: a value of 0h indicates no restrictions
582 * on transfer size. But in nvme_blk_read/write() below we have
583 * the following algorithm for maximum number of logic blocks
586 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
588 * In order for lbas not to overflow, the maximum number is 15
589 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
590 * Let's use 20 which provides 1MB size.
592 dev->max_transfer_shift = 20;
598 int nvme_scan_namespace(void)
604 ret = uclass_get(UCLASS_NVME, &uc);
608 uclass_foreach_dev(dev, uc) {
609 ret = device_probe(dev);
617 static int nvme_blk_probe(struct udevice *udev)
619 struct nvme_dev *ndev = dev_get_priv(udev->parent);
620 struct blk_desc *desc = dev_get_uclass_platdata(udev);
621 struct nvme_ns *ns = dev_get_priv(udev);
623 struct nvme_id_ns buf, *id = &buf;
624 struct pci_child_platdata *pplat;
626 memset(ns, 0, sizeof(*ns));
628 ns->ns_id = desc->devnum - ndev->blk_dev_start + 1;
629 if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)id))
632 flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
634 ns->lba_shift = id->lbaf[flbas].ds;
635 ns->mode_select_num_blocks = le64_to_cpu(id->nsze);
636 ns->mode_select_block_len = 1 << ns->lba_shift;
637 list_add(&ns->list, &ndev->namespaces);
639 desc->lba = ns->mode_select_num_blocks;
640 desc->log2blksz = ns->lba_shift;
641 desc->blksz = 1 << ns->lba_shift;
643 pplat = dev_get_parent_platdata(udev->parent);
644 sprintf(desc->vendor, "0x%.4x", pplat->vendor);
645 memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
646 memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
652 static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
653 lbaint_t blkcnt, void *buffer)
655 struct nvme_ns *ns = dev_get_priv(udev);
656 struct nvme_dev *dev = ns->dev;
657 struct nvme_command c;
658 struct blk_desc *desc = dev_get_uclass_platdata(udev);
661 u64 total_len = blkcnt << desc->log2blksz;
662 u64 temp_len = total_len;
665 u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
666 u64 total_lbas = blkcnt;
668 c.rw.opcode = nvme_cmd_read;
670 c.rw.nsid = cpu_to_le32(ns->ns_id);
679 if (total_lbas < lbas) {
680 lbas = (u16)total_lbas;
687 (dev, &prp2, lbas << ns->lba_shift, (ulong)buffer))
689 c.rw.slba = cpu_to_le64(slba);
691 c.rw.length = cpu_to_le16(lbas - 1);
692 c.rw.prp1 = cpu_to_le64((ulong)buffer);
693 c.rw.prp2 = cpu_to_le64(prp2);
694 status = nvme_submit_sync_cmd(dev->queues[1],
695 &c, NULL, IO_TIMEOUT);
698 temp_len -= lbas << ns->lba_shift;
699 buffer += lbas << ns->lba_shift;
702 return (total_len - temp_len) >> desc->log2blksz;
705 static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
706 lbaint_t blkcnt, const void *buffer)
708 struct nvme_ns *ns = dev_get_priv(udev);
709 struct nvme_dev *dev = ns->dev;
710 struct nvme_command c;
711 struct blk_desc *desc = dev_get_uclass_platdata(udev);
714 u64 total_len = blkcnt << desc->log2blksz;
715 u64 temp_len = total_len;
718 u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
719 u64 total_lbas = blkcnt;
721 c.rw.opcode = nvme_cmd_write;
723 c.rw.nsid = cpu_to_le32(ns->ns_id);
732 if (total_lbas < lbas) {
733 lbas = (u16)total_lbas;
740 (dev, &prp2, lbas << ns->lba_shift, (ulong)buffer))
742 c.rw.slba = cpu_to_le64(slba);
744 c.rw.length = cpu_to_le16(lbas - 1);
745 c.rw.prp1 = cpu_to_le64((ulong)buffer);
746 c.rw.prp2 = cpu_to_le64(prp2);
747 status = nvme_submit_sync_cmd(dev->queues[1],
748 &c, NULL, IO_TIMEOUT);
751 temp_len -= lbas << ns->lba_shift;
752 buffer += lbas << ns->lba_shift;
755 return (total_len - temp_len) >> desc->log2blksz;
758 static const struct blk_ops nvme_blk_ops = {
759 .read = nvme_blk_read,
760 .write = nvme_blk_write,
763 U_BOOT_DRIVER(nvme_blk) = {
766 .probe = nvme_blk_probe,
767 .ops = &nvme_blk_ops,
768 .priv_auto_alloc_size = sizeof(struct nvme_ns),
771 static int nvme_bind(struct udevice *udev)
774 sprintf(name, "nvme#%d", nvme_info->ndev_num++);
776 return device_set_name(udev, name);
779 static int nvme_probe(struct udevice *udev)
782 struct nvme_dev *ndev = dev_get_priv(udev);
784 ndev->instance = trailing_strtol(udev->name);
786 INIT_LIST_HEAD(&ndev->namespaces);
787 ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
789 if (readl(&ndev->bar->csts) == -1) {
791 printf("Error: %s: Out of memory!\n", udev->name);
795 ndev->queues = malloc(2 * sizeof(struct nvme_queue *));
798 printf("Error: %s: Out of memory!\n", udev->name);
801 memset(ndev->queues, 0, sizeof(2 * sizeof(struct nvme_queue *)));
803 ndev->prp_pool = malloc(MAX_PRP_POOL);
804 if (!ndev->prp_pool) {
806 printf("Error: %s: Out of memory!\n", udev->name);
809 ndev->prp_entry_num = MAX_PRP_POOL >> 3;
811 ndev->cap = nvme_readq(&ndev->bar->cap);
812 ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
813 ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
814 ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
816 ret = nvme_configure_admin_queue(ndev);
820 ret = nvme_setup_io_queues(ndev);
824 nvme_get_info_from_identify(ndev);
825 ndev->blk_dev_start = nvme_info->ns_num;
826 list_add(&ndev->node, &nvme_info->dev_list);
831 free((void *)ndev->queues);
836 U_BOOT_DRIVER(nvme) = {
841 .priv_auto_alloc_size = sizeof(struct nvme_dev),
844 struct pci_device_id nvme_supported[] = {
845 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) },
849 U_BOOT_PCI_DEVICE(nvme, nvme_supported);