X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=drivers%2Fnvme%2Fnvme.c;h=eb6fdeda5015b33ce7d52b5b62201d2d3d0dd729;hb=c0eaffa03959a97e6c139ea023e4041170e105e6;hp=88679773a7a7d5e7ca25534f83d9a9386c1a5161;hpb=04d2a3840110e495a119e6226b5c38936b48988a;p=u-boot diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c index 88679773a7..eb6fdeda50 100644 --- a/drivers/nvme/nvme.c +++ b/drivers/nvme/nvme.c @@ -1,8 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2017 NXP Semiconductors * Copyright (C) 2017 Bin Meng - * - * SPDX-License-Identifier: GPL-2.0+ */ #include @@ -13,8 +12,6 @@ #include #include "nvme.h" -struct nvme_info *nvme_info; - #define NVME_Q_DEPTH 2 #define NVME_AQ_DEPTH 2 #define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) @@ -23,6 +20,12 @@ struct nvme_info *nvme_info; #define IO_TIMEOUT 30 #define MAX_PRP_POOL 512 +enum nvme_queue_id { + NVME_ADMIN_Q, + NVME_IO_Q, + NVME_Q_NUM, +}; + /* * An NVM Express queue. Each device has at least two (one for admin * commands and one for I/O commands). @@ -209,7 +212,8 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, u32 *result) { - return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); + return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd, + result, ADMIN_TIMEOUT); } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, @@ -349,7 +353,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) if (result < 0) return result; - nvmeq = dev->queues[0]; + nvmeq = dev->queues[NVME_ADMIN_Q]; if (!nvmeq) { nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); if (!nvmeq) @@ -377,7 +381,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) nvmeq->cq_vector = 0; - nvme_init_queue(dev->queues[0], 0); + nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0); return result; @@ -428,6 +432,7 @@ int nvme_identify(struct nvme_dev *dev, unsigned nsid, u32 page_size = dev->page_size; int offset = dma_addr & (page_size - 1); int length = sizeof(struct nvme_id_ctrl); + int ret; memset(&c, 0, sizeof(c)); c.identify.opcode = nvme_admin_identify; @@ -444,7 +449,12 @@ int nvme_identify(struct nvme_dev *dev, unsigned nsid, c.identify.cns = cpu_to_le32(cns); - return nvme_submit_admin_cmd(dev, &c, NULL); + ret = nvme_submit_admin_cmd(dev, &c, NULL); + if (!ret) + invalidate_dcache_range(dma_addr, + dma_addr + sizeof(struct nvme_id_ctrl)); + + return ret; } int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, @@ -458,6 +468,11 @@ int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, c.features.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); + /* + * TODO: add cache invalidate operation when the size of + * the DMA buffer is known + */ + return nvme_submit_admin_cmd(dev, &c, result); } @@ -472,6 +487,11 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, c.features.fid = cpu_to_le32(fid); c.features.dword11 = cpu_to_le32(dword11); + /* + * TODO: add cache flush operation when the size of + * the DMA buffer is known + */ + return nvme_submit_admin_cmd(dev, &c, result); } @@ -541,9 +561,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (result <= 0) return result; - if (result < nr_io_queues) - nr_io_queues = result; - dev->max_qid = nr_io_queues; /* Free previously allocated queues */ @@ -555,7 +572,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) static int nvme_get_info_from_identify(struct nvme_dev *dev) { - struct nvme_id_ctrl buf, *ctrl = &buf; + ALLOC_CACHE_ALIGN_BUFFER(char, buf, sizeof(struct nvme_id_ctrl)); + struct nvme_id_ctrl *ctrl = (struct nvme_id_ctrl *)buf; int ret; int shift = NVME_CAP_MPSMIN(dev->cap) + 12; @@ -620,12 +638,14 @@ static int nvme_blk_probe(struct udevice *udev) struct blk_desc *desc = dev_get_uclass_platdata(udev); struct nvme_ns *ns = dev_get_priv(udev); u8 flbas; - struct nvme_id_ns buf, *id = &buf; + ALLOC_CACHE_ALIGN_BUFFER(char, buf, sizeof(struct nvme_id_ns)); + struct nvme_id_ns *id = (struct nvme_id_ns *)buf; struct pci_child_platdata *pplat; memset(ns, 0, sizeof(*ns)); ns->dev = ndev; - ns->ns_id = desc->devnum - ndev->blk_dev_start + 1; + /* extract the namespace id from the block device name */ + ns->ns_id = trailing_strtol(udev->name) + 1; if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)id)) return -EIO; @@ -649,8 +669,8 @@ static int nvme_blk_probe(struct udevice *udev) return 0; } -static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr, - lbaint_t blkcnt, void *buffer) +static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr, + lbaint_t blkcnt, void *buffer, bool read) { struct nvme_ns *ns = dev_get_priv(udev); struct nvme_dev *dev = ns->dev; @@ -665,7 +685,11 @@ static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr, u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); u64 total_lbas = blkcnt; - c.rw.opcode = nvme_cmd_read; + if (!read) + flush_dcache_range((unsigned long)buffer, + (unsigned long)buffer + total_len); + + c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write; c.rw.flags = 0; c.rw.nsid = cpu_to_le32(ns->ns_id); c.rw.control = 0; @@ -683,76 +707,39 @@ static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr, total_lbas -= lbas; } - if (nvme_setup_prps - (dev, &prp2, lbas << ns->lba_shift, (ulong)buffer)) + if (nvme_setup_prps(dev, &prp2, + lbas << ns->lba_shift, (ulong)buffer)) return -EIO; c.rw.slba = cpu_to_le64(slba); slba += lbas; c.rw.length = cpu_to_le16(lbas - 1); c.rw.prp1 = cpu_to_le64((ulong)buffer); c.rw.prp2 = cpu_to_le64(prp2); - status = nvme_submit_sync_cmd(dev->queues[1], + status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q], &c, NULL, IO_TIMEOUT); if (status) break; - temp_len -= lbas << ns->lba_shift; + temp_len -= (u32)lbas << ns->lba_shift; buffer += lbas << ns->lba_shift; } + if (read) + invalidate_dcache_range((unsigned long)buffer, + (unsigned long)buffer + total_len); + return (total_len - temp_len) >> desc->log2blksz; } +static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr, + lbaint_t blkcnt, void *buffer) +{ + return nvme_blk_rw(udev, blknr, blkcnt, buffer, true); +} + static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt, const void *buffer) { - struct nvme_ns *ns = dev_get_priv(udev); - struct nvme_dev *dev = ns->dev; - struct nvme_command c; - struct blk_desc *desc = dev_get_uclass_platdata(udev); - int status; - u64 prp2; - u64 total_len = blkcnt << desc->log2blksz; - u64 temp_len = total_len; - - u64 slba = blknr; - u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); - u64 total_lbas = blkcnt; - - c.rw.opcode = nvme_cmd_write; - c.rw.flags = 0; - c.rw.nsid = cpu_to_le32(ns->ns_id); - c.rw.control = 0; - c.rw.dsmgmt = 0; - c.rw.reftag = 0; - c.rw.apptag = 0; - c.rw.appmask = 0; - c.rw.metadata = 0; - - while (total_lbas) { - if (total_lbas < lbas) { - lbas = (u16)total_lbas; - total_lbas = 0; - } else { - total_lbas -= lbas; - } - - if (nvme_setup_prps - (dev, &prp2, lbas << ns->lba_shift, (ulong)buffer)) - return -EIO; - c.rw.slba = cpu_to_le64(slba); - slba += lbas; - c.rw.length = cpu_to_le16(lbas - 1); - c.rw.prp1 = cpu_to_le64((ulong)buffer); - c.rw.prp2 = cpu_to_le64(prp2); - status = nvme_submit_sync_cmd(dev->queues[1], - &c, NULL, IO_TIMEOUT); - if (status) - break; - temp_len -= lbas << ns->lba_shift; - buffer += lbas << ns->lba_shift; - } - - return (total_len - temp_len) >> desc->log2blksz; + return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false); } static const struct blk_ops nvme_blk_ops = { @@ -770,8 +757,10 @@ U_BOOT_DRIVER(nvme_blk) = { static int nvme_bind(struct udevice *udev) { + static int ndev_num; char name[20]; - sprintf(name, "nvme#%d", nvme_info->ndev_num++); + + sprintf(name, "nvme#%d", ndev_num++); return device_set_name(udev, name); } @@ -792,13 +781,13 @@ static int nvme_probe(struct udevice *udev) goto free_nvme; } - ndev->queues = malloc(2 * sizeof(struct nvme_queue *)); + ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *)); if (!ndev->queues) { ret = -ENOMEM; printf("Error: %s: Out of memory!\n", udev->name); goto free_nvme; } - memset(ndev->queues, 0, sizeof(2 * sizeof(struct nvme_queue *))); + memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *)); ndev->prp_pool = malloc(MAX_PRP_POOL); if (!ndev->prp_pool) { @@ -822,8 +811,6 @@ static int nvme_probe(struct udevice *udev) goto free_queue; nvme_get_info_from_identify(ndev); - ndev->blk_dev_start = nvme_info->ns_num; - list_add(&ndev->node, &nvme_info->dev_list); return 0;