i = 0;
while (nprps) {
if ((i == (prps_per_page - 1)) && nprps > 1) {
- *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
- page_size);
+ *(prp_pool + i) = cpu_to_le64(virt_to_phys((void *)prp_pool +
+ page_size));
i = 0;
prp_pool += page_size;
}
dma_addr += page_size;
nprps--;
}
- *prp2 = (ulong)dev->prp_pool;
+ *prp2 = (ulong)virt_to_phys(dev->prp_pool);
flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
num_pages * page_size);
dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
writel(aqa, &dev->bar->aqa);
- nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
- nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
+ nvme_writeq((ulong)virt_to_phys(nvmeq->sq_cmds), &dev->bar->asq);
+ nvme_writeq((ulong)virt_to_phys(nvmeq->cqes), &dev->bar->acq);
result = nvme_enable_ctrl(dev);
if (result)
memset(&c, 0, sizeof(c));
c.create_cq.opcode = nvme_admin_create_cq;
- c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
+ c.create_cq.prp1 = cpu_to_le64((ulong)virt_to_phys(nvmeq->cqes));
c.create_cq.cqid = cpu_to_le16(qid);
c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
c.create_cq.cq_flags = cpu_to_le16(flags);
memset(&c, 0, sizeof(c));
c.create_sq.opcode = nvme_admin_create_sq;
- c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
+ c.create_sq.prp1 = cpu_to_le64((ulong)virt_to_phys(nvmeq->sq_cmds));
c.create_sq.sqid = cpu_to_le16(qid);
c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
c.create_sq.sq_flags = cpu_to_le16(flags);
}
int nvme_identify(struct nvme_dev *dev, unsigned nsid,
- unsigned cns, dma_addr_t dma_addr)
+ unsigned cns, unsigned long addr)
{
struct nvme_command c;
u32 page_size = dev->page_size;
+ dma_addr_t dma_addr = virt_to_phys((void *)addr);
int offset = dma_addr & (page_size - 1);
int length = sizeof(struct nvme_id_ctrl);
int ret;
c.identify.cns = cpu_to_le32(cns);
- invalidate_dcache_range(dma_addr,
- dma_addr + sizeof(struct nvme_id_ctrl));
+ invalidate_dcache_range(addr,
+ addr + sizeof(struct nvme_id_ctrl));
ret = nvme_submit_admin_cmd(dev, &c, NULL);
if (!ret)
- invalidate_dcache_range(dma_addr,
- dma_addr + sizeof(struct nvme_id_ctrl));
+ invalidate_dcache_range(addr,
+ addr + sizeof(struct nvme_id_ctrl));
return ret;
}
if (!ctrl)
return -ENOMEM;
- ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
+ ret = nvme_identify(dev, 0, 1, (unsigned long)ctrl);
if (ret) {
free(ctrl);
return -EIO;
ns->dev = ndev;
/* extract the namespace id from the block device name */
ns->ns_id = trailing_strtol(udev->name);
- if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
+ if (nvme_identify(ndev, ns->ns_id, 0, (unsigned long)id)) {
free(id);
return -EIO;
}
u64 prp2;
u64 total_len = blkcnt << desc->log2blksz;
u64 temp_len = total_len;
- uintptr_t temp_buffer = (uintptr_t)buffer;
+ uintptr_t temp_buffer = (uintptr_t)virt_to_phys(buffer);
u64 slba = blknr;
u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
char name[20];
memset(id, 0, sizeof(*id));
- if (nvme_identify(ndev, i, 0, (dma_addr_t)(long)id)) {
+ if (nvme_identify(ndev, i, 0, (unsigned long)id)) {
ret = -EIO;
goto free_id;
}