block/nvme: Define INDEX macros to ease code review
Use definitions instead of '0' or '1' indexes. Also this will be useful when using multi-queues later. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-Id: <20200821195359.1285345-5-philmd@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
0ea45f76eb
commit
73159e52e6
33
block/nvme.c
33
block/nvme.c
|
@ -103,6 +103,9 @@ typedef volatile struct {
|
||||||
|
|
||||||
QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
|
QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
|
||||||
|
|
||||||
|
#define INDEX_ADMIN 0
|
||||||
|
#define INDEX_IO(n) (1 + n)
|
||||||
|
|
||||||
struct BDRVNVMeState {
|
struct BDRVNVMeState {
|
||||||
AioContext *aio_context;
|
AioContext *aio_context;
|
||||||
QEMUVFIOState *vfio;
|
QEMUVFIOState *vfio;
|
||||||
|
@ -531,7 +534,7 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
||||||
}
|
}
|
||||||
cmd.dptr.prp1 = cpu_to_le64(iova);
|
cmd.dptr.prp1 = cpu_to_le64(iova);
|
||||||
|
|
||||||
if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
|
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
|
||||||
error_setg(errp, "Failed to identify controller");
|
error_setg(errp, "Failed to identify controller");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -555,7 +558,7 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
||||||
|
|
||||||
cmd.cdw10 = 0;
|
cmd.cdw10 = 0;
|
||||||
cmd.nsid = cpu_to_le32(namespace);
|
cmd.nsid = cpu_to_le32(namespace);
|
||||||
if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
|
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
|
||||||
error_setg(errp, "Failed to identify namespace");
|
error_setg(errp, "Failed to identify namespace");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -644,7 +647,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
|
||||||
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
|
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
|
||||||
.cdw11 = cpu_to_le32(0x3),
|
.cdw11 = cpu_to_le32(0x3),
|
||||||
};
|
};
|
||||||
if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
|
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
|
||||||
error_setg(errp, "Failed to create io queue [%d]", n);
|
error_setg(errp, "Failed to create io queue [%d]", n);
|
||||||
nvme_free_queue_pair(q);
|
nvme_free_queue_pair(q);
|
||||||
return false;
|
return false;
|
||||||
|
@ -655,7 +658,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
|
||||||
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
|
.cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
|
||||||
.cdw11 = cpu_to_le32(0x1 | (n << 16)),
|
.cdw11 = cpu_to_le32(0x1 | (n << 16)),
|
||||||
};
|
};
|
||||||
if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
|
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
|
||||||
error_setg(errp, "Failed to create io queue [%d]", n);
|
error_setg(errp, "Failed to create io queue [%d]", n);
|
||||||
nvme_free_queue_pair(q);
|
nvme_free_queue_pair(q);
|
||||||
return false;
|
return false;
|
||||||
|
@ -739,16 +742,18 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
||||||
|
|
||||||
/* Set up admin queue. */
|
/* Set up admin queue. */
|
||||||
s->queues = g_new(NVMeQueuePair *, 1);
|
s->queues = g_new(NVMeQueuePair *, 1);
|
||||||
s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp);
|
s->queues[INDEX_ADMIN] = nvme_create_queue_pair(bs, 0,
|
||||||
if (!s->queues[0]) {
|
NVME_QUEUE_SIZE,
|
||||||
|
errp);
|
||||||
|
if (!s->queues[INDEX_ADMIN]) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
s->nr_queues = 1;
|
s->nr_queues = 1;
|
||||||
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
|
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
|
||||||
s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
|
s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
|
||||||
s->regs->asq = cpu_to_le64(s->queues[0]->sq.iova);
|
s->regs->asq = cpu_to_le64(s->queues[INDEX_ADMIN]->sq.iova);
|
||||||
s->regs->acq = cpu_to_le64(s->queues[0]->cq.iova);
|
s->regs->acq = cpu_to_le64(s->queues[INDEX_ADMIN]->cq.iova);
|
||||||
|
|
||||||
/* After setting up all control registers we can enable device now. */
|
/* After setting up all control registers we can enable device now. */
|
||||||
s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
|
s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
|
||||||
|
@ -839,7 +844,7 @@ static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
|
||||||
.cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
|
.cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
|
||||||
};
|
};
|
||||||
|
|
||||||
ret = nvme_cmd_sync(bs, s->queues[0], &cmd);
|
ret = nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
error_setg(errp, "Failed to configure NVMe write cache");
|
error_setg(errp, "Failed to configure NVMe write cache");
|
||||||
}
|
}
|
||||||
|
@ -1056,7 +1061,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
BDRVNVMeState *s = bs->opaque;
|
BDRVNVMeState *s = bs->opaque;
|
||||||
NVMeQueuePair *ioq = s->queues[1];
|
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
|
||||||
NVMeRequest *req;
|
NVMeRequest *req;
|
||||||
|
|
||||||
uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
|
uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
|
||||||
|
@ -1171,7 +1176,7 @@ static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
|
||||||
static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
|
static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
BDRVNVMeState *s = bs->opaque;
|
BDRVNVMeState *s = bs->opaque;
|
||||||
NVMeQueuePair *ioq = s->queues[1];
|
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
|
||||||
NVMeRequest *req;
|
NVMeRequest *req;
|
||||||
NvmeCmd cmd = {
|
NvmeCmd cmd = {
|
||||||
.opcode = NVME_CMD_FLUSH,
|
.opcode = NVME_CMD_FLUSH,
|
||||||
|
@ -1202,7 +1207,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
|
||||||
BdrvRequestFlags flags)
|
BdrvRequestFlags flags)
|
||||||
{
|
{
|
||||||
BDRVNVMeState *s = bs->opaque;
|
BDRVNVMeState *s = bs->opaque;
|
||||||
NVMeQueuePair *ioq = s->queues[1];
|
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
|
||||||
NVMeRequest *req;
|
NVMeRequest *req;
|
||||||
|
|
||||||
uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
|
uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
|
||||||
|
@ -1255,7 +1260,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
|
||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
BDRVNVMeState *s = bs->opaque;
|
BDRVNVMeState *s = bs->opaque;
|
||||||
NVMeQueuePair *ioq = s->queues[1];
|
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
|
||||||
NVMeRequest *req;
|
NVMeRequest *req;
|
||||||
NvmeDsmRange *buf;
|
NvmeDsmRange *buf;
|
||||||
QEMUIOVector local_qiov;
|
QEMUIOVector local_qiov;
|
||||||
|
@ -1398,7 +1403,7 @@ static void nvme_aio_unplug(BlockDriverState *bs)
|
||||||
BDRVNVMeState *s = bs->opaque;
|
BDRVNVMeState *s = bs->opaque;
|
||||||
assert(s->plugged);
|
assert(s->plugged);
|
||||||
s->plugged = false;
|
s->plugged = false;
|
||||||
for (i = 1; i < s->nr_queues; i++) {
|
for (i = INDEX_IO(0); i < s->nr_queues; i++) {
|
||||||
NVMeQueuePair *q = s->queues[i];
|
NVMeQueuePair *q = s->queues[i];
|
||||||
qemu_mutex_lock(&q->lock);
|
qemu_mutex_lock(&q->lock);
|
||||||
nvme_kick(q);
|
nvme_kick(q);
|
||||||
|
|
Loading…
Reference in a new issue