/*
* kernel: 4.5.2
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/interrupt.h> // needed in 4.3.3
#define TEST_PCIE_DEV_NAME "test_pcie"
//#define PCI_VENDOR_ID_XILINX 0x10EE /* already defined in <linux/pci_ids.h> The default value, 10EEh, is the Vendor ID for Xilinx. */
#define TEST_PCI_DEVICE_ID_XILINX 0x7033 // !!!check here! /* the default value is 70<link speed><link width>h */
#define TEST_SSD_DEV_NAME "test_ssd"
#define TEST_SSD_PARTITONS 1
#define TEST_SSD_MINORS 1
#define TEST_DEV_MEM_SZ (1 << 22) // 4MB
#define KTHREAD_NAME "test_kthread_fn"
#define COHERENT_DMA_BUF_SZ (1 << 22) // !!!4MB; no less than 1MB
#define AXIBAR2PCIEBAR0_OFFSET_U 0x8208
#define AXIBAR2PCIEBAR0_OFFSET_L 0x820c
#define AXIBAR2PCIEBAR1_OFFSET_U 0x8210
#define AXIBAR2PCIEBAR1_OFFSET_L 0x8214
#define CDMA_STAT_OFFSET 0xc004
#define AXI_BAR0 0x80800000
#define AXI_BAR1 0X80000000
#define START_DESCRIPTOR 0xc008
#define END_DESCRIPTOR 0xc010
#define BRAM_AXI_ADDR 0x81000000
#define PCIE_BAR 0x81000000
#define C_BASEADDR 0XC000 // according to hardware project
#define CDMA_CTRL_REG_OFFSET 0X00
#define CDMA_STAT_REG_OFFSET 0X04
#define CDMA_SA_REG_OFFSET 0X18
#define CDMA_DA_REG_OFFSET 0X20
#define CDMA_BTT_REG_OFFSET 0X28
struct io_cmd {
struct bio *bio; // !!!
struct scatterlist *scatlist;
dma_addr_t dma_addr; // used by DMA controller of the device
void *kvaddr; // kernel virtual address, used by kernel and driver, especially to deal with data from userspace(__bio_kmap_atomic)
uint32_t len;
};
struct io_que {
struct bio_list bio_lst; // !!!composed of bio, a singly-linked list of bios
struct task_struct *task_s;
struct io_cmd *io_cmd; // above
struct ssd_dev *ssd_dev; // !!!below
spinlock_t lock;
uint8_t volatile is_busy; // origin: unsigned int, DMA busy flag
};
struct ssd_dev {
struct pci_dev *pci_dev;
struct gendisk *disk; // linux/genhd.h
void __iomem *pci_bar; // !!!!!!above, __iomem is needed
struct io_que *dev_que; // !!!above
};
/*
static void setup_cmd(struct io_cmd *io_cmd, struct bio *bio, struct io_que *dev_que)
{
io_cmd->bio = bio; // !!!!!!save it until test_bio_complete
}
*/
/*
static int setup_scatter_map(struct ssd_dev *ssd_dev, struct io_cmd *io_cmd, unsigned int const phys_segs)
{
void *kvaddr; // volatile struct scatter_region *
dma_addr_t dma_addr;
// !!!!!!return two params! set here!
kvaddr = dma_alloc_coherent(&ssd_dev->pci_dev->dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC | GFP_DMA);
if (kvaddr == NULL) {
printk("err_dma_pool_alloc\n");
return -ENOMEM;
}
io_cmd->kvaddr = kvaddr;
io_cmd->dma_addr = dma_addr;
io_cmd->len = phys_segs;
return 0;
}
*/
/*
static int setup_scatter_list(struct io_que *dev_que, struct io_cmd *io_cmd, struct bio *bio)
{
//struct ssd_dev *ssd_dev;
struct bio_vec prev_bv, cur_bv; // !!!
struct bvec_iter bvec_iter; // !!!
struct scatterlist *cur_scatlist = NULL;
unsigned int phys_segs, bytes_len = 0;
unsigned char isnt_first_bio_vec = 0u; // !!!
int result = -ENOMEM;
phys_segs = bio_phys_segments(dev_que->ssd_dev->disk->queue, bio); // !!!
io_cmd->scatlist = (struct scatterlist *)kmalloc(sizeof(struct scatterlist) * phys_segs, GFP_ATOMIC | GFP_DMA); // !!!
if (io_cmd->scatlist == NULL) {
printk("err_alloc_scatterlist\n");
goto err_alloc_scatterlist;
}
sg_init_table(io_cmd->scatlist, phys_segs); // !!!lib/scatterlist.c
phys_segs = 0;
memset(&prev_bv, 0, sizeof(struct bio_vec)); // !!!!!!prev_bv need to be initialized
bio_for_each_segment(cur_bv, bio, bvec_iter) { // !!!
if (isnt_first_bio_vec && BIOVEC_PHYS_MERGEABLE(&prev_bv, &cur_bv)) { // !!!BIOVEC_PHYS_MERGEABLE is defined in bio.h
cur_scatlist->length += cur_bv.bv_len; // !!!
} else {
if (isnt_first_bio_vec)
cur_scatlist++;
else
cur_scatlist = io_cmd->scatlist;
sg_set_page(cur_scatlist, cur_bv.bv_page, cur_bv.bv_len, cur_bv.bv_offset); // !!!in <linux/scatterlist.h>
phys_segs++;
}
bytes_len += cur_bv.bv_len; // !!!
prev_bv = cur_bv;
isnt_first_bio_vec = 1u;
}
sg_mark_end(cur_scatlist); // !!!<linux/scatterlist.h>
//ssd_dev = dev_que->ssd_dev;
result = dma_map_sg(&dev_que->ssd_dev->pci_dev->dev, io_cmd->scatlist, phys_segs,
bio_data_dir(io_cmd->bio) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE); // !!!???What's its use?
if (result == 0) {
printk("err_dma_map_sg\n");
goto err_dma_map_sg;
}
result = setup_scatter_map(dev_que->ssd_dev, io_cmd, phys_segs); // above
if (result)
goto err_setup_scatter_map;
bio->bi_iter.bi_sector += (sector_t)(bytes_len >> 9); // !!!it will not be set by the kernel?
bio->bi_iter.bi_idx = bvec_iter.bi_idx; // !!!
return 0;
err_setup_scatter_map:
dma_unmap_sg(&dev_que->ssd_dev->pci_dev->dev, io_cmd->scatlist, phys_segs,
bio_data_dir(io_cmd->bio) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
printk("err_setup_scatter_map\n");
err_dma_map_sg:
kfree(io_cmd->scatlist);
err_alloc_scatterlist:
return -ENOMEM;
}
*/
/*
static void submit_cmd(struct io_que *dev_que) // !!!actually starts a DMA transfer
{
dma_addr_t rq_dma_addr;
struct ssd_dev *ssd_dev;
ssd_dev = dev_que->ssd_dev;
rq_dma_addr = dev_que->io_cmd->dma_addr;
dev_que->is_busy = 1; // !!!!!!busy flag
}
*/
/*
static int make_bio_request(struct io_que *io_que, struct bio *bio)
{
int result = -EBUSY;
setup_cmd(io_que->io_cmd, bio, io_que); // above, has modified io_que->io_cmd
result = setup_scatter_list(io_que, io_que->io_cmd, bio); // above
if (result) {
printk("err_setup_scatter_list\n");
goto err_setup_scatter_list;
}
submit_cmd(io_que); // above
return 0;
err_setup_scatter_list:
return -ENOMEM;
}
*/
static void test_process_bio(struct io_que *io_que, struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
unsigned int bytes = bio->bi_iter.bi_sector << 9;
unsigned int const dir = bio_data_dir(bio);
void * const kvaddr = io_que->io_cmd->kvaddr; // get it
dma_addr_t const dma_addr = io_que->io_cmd->dma_addr; // get it
unsigned int dbg_var = 0;
//printk("axi bar1 high 32bits is %x\n",
// readl((unsigned char *)io_que->ssd_dev->pci_bar + AXIBAR2PCIEBAR1_OFFSET_U));
//printk("axi bar1 low 32 bits is %x\n",
// readl((unsigned char *)io_que->ssd_dev->pci_bar + AXIBAR2PCIEBAR1_OFFSET_L));
//!!!printk("this bio has %d segs\n", bio_phys_segments(io_que->ssd_dev->disk->queue, bio)); // !!!
bio_for_each_segment(bvec, bio, iter) {
void *buffer = __bio_kmap_atomic(bio, iter);
unsigned int cur_bv_len = bio_cur_bytes(bio);
unsigned int cnt = 0;
//printk("No.%useg begin\n", dbg_var);
//printk("bytes=%u, cur_bv_len=%u\n", bytes, cur_bv_len);
if (bytes + cur_bv_len >= TEST_DEV_MEM_SZ) // !!!
printk("beyond-end rd/wr\n");
if (dir == WRITE) {
memcpy((void *)(unsigned long)kvaddr, buffer, cur_bv_len); // kvaddr
writel((unsigned long)/*dma_addr*/AXI_BAR1,
(unsigned char *)io_que->ssd_dev->pci_bar + C_BASEADDR + CDMA_SA_REG_OFFSET); // SA is the mapped AXI_BAR(map to dma_addr before)
writel(/*AXI_BAR1*/0x400000 + bytes,
(unsigned char *)io_que->ssd_dev->pci_bar + C_BASEADDR + CDMA_DA_REG_OFFSET); // DA is DDR
writel(cur_bv_len,
(unsigned char *)io_que->ssd_dev->pci_bar + C_BASEADDR + CDMA_BTT_REG_OFFSET); // BTT
while (((readl((unsigned char *)io_que->ssd_dev->pci_bar + C_BASEADDR + CDMA_STAT_REG_OFFSET) >> 1) & 0x1) == 0x0) {
cnt++;
if (cnt > 10000) {
printk("couldn't wait to idle\n");
break;
}
}
} else if (dir == READ) {
writel(0x400000 + bytes, (unsigned char *)io_que->ssd_dev->pci_bar + C_BASEADDR + CDMA_SA_REG_OFFSET); // SA
writel((unsigned long)AXI_BAR1, (unsigned char *)io_que->ssd_dev->pci_bar + C_BASEADDR + CDMA_DA_REG_OFFSET); // DA
writel(cur_bv_len, (unsigned char *)io_que->ssd_dev->pci_bar + C_BASEADDR + CDMA_BTT_REG_OFFSET); // BTT
while (((readl((unsigned char *)io_que->ssd_dev->pci_bar + C_BASEADDR + CDMA_STAT_REG_OFFSET) >> 1) & 0x1) == 0x0) {
cnt++;
if (cnt > 10000) {
printk("couldn't wait to idle\n");
break;
}
}
memcpy(buffer, (void *)(unsigned long)kvaddr, cur_bv_len);
}
//printk("after transfer CDMA_STAT is %x\n", readl((unsigned char *)io_que->ssd_dev->pci_bar + CDMA_STAT_OFFSET)); // !!!
bytes += cur_bv_len;
__bio_kunmap_atomic(buffer);
//printk("No.%useg end\n", dbg_var);
dbg_var++;
}
bio_endio(bio); // !!!
}
/*
static void free_scatter_map(struct ssd_dev *ssd_dev, struct io_cmd *io_cmd)
{
dma_unmap_sg(&ssd_dev->pci_dev->dev, io_cmd->scatlist, io_cmd->len,
bio_data_dir(io_cmd->bio) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE); // !!!
dma_free_coherent(&ssd_dev->pci_dev->dev, PAGE_SIZE, io_cmd->kvaddr,
io_cmd->dma_addr);
kfree(io_cmd->scatlist);
}
*/
/*
static void test_bio_complete(struct ssd_dev *ssd_dev, struct io_que *dev_que) // !!!???logic wrong?
{
struct bio *bio;
struct io_cmd *io_cmd;
io_cmd = dev_que->io_cmd;
free_scatter_map(ssd_dev, io_cmd); // above
bio = io_cmd->bio; // !!!has been saved before
if (bio->bi_vcnt == bio->bi_iter.bi_idx)
bio_endio(bio); // !!!
dev_que->is_busy = 0; // !!!not busy
if (bio_list_peek(&dev_que->bio_lst))
wake_up_process(dev_que->task_s);
}
*/
// !!!consumer: has been binded below !!!!!!wrong?
static int test_kthread_fn(void *data)
{
struct io_que *dev_que;
struct bio *bio;
dev_que = (struct io_que *)data;
if (dev_que == NULL)
printk("in test_kthread_fn dev_que is null!\n");
do {
//struct bio *bio;
//if (dev_que->is_busy) // !!!!!!DMA channel is busy
//goto sleep_this_thread;
if (bio_list_peek(&dev_que->bio_lst)) {
spin_lock(&dev_que->lock);
bio = bio_list_pop(&dev_que->bio_lst); // !!!!!!get bio
spin_unlock(&dev_que->lock);
//printk("test_kthread_fn: get a bio\n");
/*if (make_bio_request(dev_que, bio)) { // above
spin_lock(&dev_que->lock);
bio_list_add_head(&dev_que->bio_lst, bio); // add from head
spin_unlock(&dev_que->lock);
}*/
test_process_bio(dev_que, bio); // !!!!!!
}
//test_bio_complete(ssd_dev, dev_que); // above, orginally it is here!!! why it is not defined but can be compiled??????
//sleep_this_thread:
schedule(); // !!!make this thread sleep!!!!necessary!
} while (!kthread_should_stop()); // !!!kthread.c
return 0;
}
/*
static irqreturn_t irq_handler(int irq, void *dev_id) // !!!
{
struct ssd_dev *ssd_dev;
struct io_que *dev_que;
printk("irq_handler\n");
dev_que = (struct io_que *)dev_id; // !!!
ssd_dev = dev_que->ssd_dev; // !!!
//spin_lock_irq(&dev_que->lock);
test_bio_complete(ssd_dev, dev_que); // !!!above
//spin_unlock_irq(&dev_que->lock);
return IRQ_HANDLED;
}
*/
static int alloc_kthread(struct io_que *dev_que) // !!!create consumer and make it run
{
dev_que->task_s = kthread_run(&test_kthread_fn, dev_que, KTHREAD_NAME); // !!!kthread.h kthread.c
if (IS_ERR(dev_que->task_s)) { /* def in <linux/err.h> */
printk("err: kthread_run\n");
return PTR_ERR(dev_que->task_s);
} else
return 0;
}
static void free_kthread(struct io_que *dev_que)
{
if (kthread_stop(dev_que->task_s) == 0) // kthread.c, struct task_struct *
printk("threadfn has returned\n");
}
// !!!producer: binded with make_request below, only to add bio to the bio_list. blk_qc_t is unsigned int
static void test_make_request_fn(struct request_queue *queue, struct bio *bio)
{
struct io_que *dev_que;
dev_que = (struct io_que *)queue->queuedata; // !!!
spin_lock(&dev_que->lock);
bio_list_add(&dev_que->bio_lst, bio); // !!!add from tail
spin_unlock(&dev_que->lock);
//printk("test_make_request_fn: add a bio\n");
}
// !!!!!!ssd_dev already alloc, and it's disk already alloc.
static struct io_que *alloc_io_que(struct ssd_dev *ssd_dev)
{
struct io_que *dev_que; // const
dev_que = (struct io_que *)kmalloc(sizeof(struct io_que), GFP_KERNEL); // !!!
if (dev_que == NULL) {
printk("err_alloc_dev_que\n");
goto err_alloc_dev_que;
}
ssd_dev->dev_que = dev_que; // !!!!!!
dev_que->ssd_dev = ssd_dev; // !!!!!!
spin_lock_init(&dev_que->lock); // only for init
bio_list_init(&dev_que->bio_lst); // only for init, struct bio_list, bl->head = bl->tail = NULL; comes before consumer!!!
dev_que->is_busy = 0; // !!!only for init
dev_que->io_cmd = (struct io_cmd *)kmalloc(sizeof(struct io_cmd), GFP_KERNEL); // !!!!!!
if (dev_que->io_cmd == NULL) {
printk("err_alloc_io_cmd\n");
goto err_alloc_io_cmd;
}
dev_que->io_cmd->kvaddr = dma_alloc_coherent(&dev_que->ssd_dev->pci_dev->dev, COHERENT_DMA_BUF_SZ,
&dev_que->io_cmd->dma_addr, GFP_ATOMIC | GFP_DMA); // !!!!!!4MB
if (dev_que->io_cmd->kvaddr == NULL) {
printk("in alloc_io_que: err_dma_pool_alloc\n");
goto err_dma_alloc_coherent;
}
writel((unsigned long)dev_que->io_cmd->dma_addr,
(unsigned char *)dev_que->ssd_dev->pci_bar + AXIBAR2PCIEBAR1_OFFSET_L); // !!!!!!map dma_addr(fixed position) to AXI_BAR
writel((unsigned long)(dev_que->io_cmd->dma_addr >> 32),
(unsigned char *)dev_que->ssd_dev->pci_bar + AXIBAR2PCIEBAR1_OFFSET_U);
printk("before trans stat_reg: %x\n", readl((unsigned char *)dev_que->ssd_dev->pci_bar + C_BASEADDR + CDMA_STAT_REG_OFFSET));
if (alloc_kthread(dev_que)) { // !!!!!!consumer comes before producer
printk("err: alloc_kthread\n");
goto err_alloc_kthread;
}
dev_que->ssd_dev->disk->queue = blk_alloc_queue(GFP_KERNEL); // !!!!!!
if (dev_que->ssd_dev->disk->queue == NULL) {
printk("err: blk_alloc_queue\n");
goto err_blk_alloc_queue;
}
dev_que->ssd_dev->disk->queue->queuedata = dev_que; // !!!void *queuedata, point to itself
dev_que->ssd_dev->disk->queue->queue_flags = QUEUE_FLAG_DEFAULT; // it is needed
//queue_flag_set(QUEUE_FLAG_NOMERGES, dev_que->ssd_dev->disk->queue); /* disable merge attempts */
queue_flag_set(QUEUE_FLAG_NONROT, dev_que->ssd_dev->disk->queue); /* non-rotational device (SSD) */
blk_queue_make_request(dev_que->ssd_dev->disk->queue, &test_make_request_fn); // !!!binded make_request_fn(producer) to the queue
return dev_que;
err_blk_alloc_queue:
free_kthread(dev_que);
err_alloc_kthread:
dma_free_coherent(&dev_que->ssd_dev->pci_dev->dev, COHERENT_DMA_BUF_SZ, dev_que->io_cmd->kvaddr, dev_que->io_cmd->dma_addr); // !!!
err_dma_alloc_coherent:
kfree(dev_que->io_cmd);
err_alloc_io_cmd:
kfree(dev_que);
err_alloc_dev_que:
return NULL;
}
static void free_io_que(struct ssd_dev *ssd_dev, struct io_que *dev_que)
{
blk_cleanup_queue(dev_que->ssd_dev->disk->queue);
free_kthread(dev_que);
dma_free_coherent(&dev_que->ssd_dev->pci_dev->dev, COHERENT_DMA_BUF_SZ, dev_que->io_cmd->kvaddr, dev_que->io_cmd->dma_addr); // !!!
kfree(dev_que->io_cmd);
kfree(dev_que);
}
static int test_ssd_open(struct block_device *bdev, fmode_t mode)
{
//printk("test_ssd_open\n");
return 0;
}
static void test_ssd_release(struct gendisk *disk, fmode_t mode)
{
//printk("test_ssd_release\n");
}
static struct block_device_operations ssd_fops = {
.open = &test_ssd_open,
.release = &test_ssd_release,
.owner = THIS_MODULE,
};
static int blkdev_major;
static int test_ssd_init(struct ssd_dev *ssd_dev) // !!!
{
struct io_que *dev_que;
int result = -ENOMEM;
printk("blkdev init begin\n");
blkdev_major = register_blkdev(0, TEST_SSD_DEV_NAME); // !!!try to allocate any unused major number.
if (blkdev_major < 0) {
printk("err: register_blkdev\n");
goto err_register_blkdev;
}
ssd_dev->disk = alloc_disk(TEST_SSD_PARTITONS); // !!!
if (ssd_dev->disk == NULL) {
printk("err: alloc_disk\n");
result = -ENOMEM;
goto err_alloc_disk;
}
ssd_dev->disk->major = blkdev_major;
ssd_dev->disk->first_minor = 0; // !!!!!!
ssd_dev->disk->minors = TEST_SSD_MINORS;
sprintf(ssd_dev->disk->disk_name, "%s" , TEST_SSD_DEV_NAME); // !!!
ssd_dev->disk->fops = &ssd_fops;
ssd_dev->disk->private_data = ssd_dev; // !!!
//ssd_dev->disk->driverfs_dev = &ssd_dev->pci_dev->dev; // genhd.h: struct device *driverfs_dev; // FIXME: remove
set_capacity(ssd_dev->disk, TEST_DEV_MEM_SZ >> 9); // in unit of sector(512 bytes long independently)
dev_que = alloc_io_que(ssd_dev); // !!!above, set ssd_dev->disk->queue
if (dev_que == NULL) {
printk("err: alloc_io_que\n");
result = -ENOMEM;
goto err_alloc_io_que;
}
add_disk(ssd_dev->disk); // !!!!!!
// "ssd_dev->pci_dev->irq" init in pci_enable_msi func. dev_que is for param of isr
//if (request_irq(ssd_dev->pci_dev->irq, &irq_handler,
//IRQF_NOBALANCING | IRQF_SHARED, ssd_dev->disk->disk_name, dev_que) < 0) {
//printk("err_request_irq\n");
//goto err_request_irq;
//}
printk("blkdev init end\n");
return 0;
//err_request_irq:
err_alloc_io_que:
del_gendisk(ssd_dev->disk); // !!!
put_disk(ssd_dev->disk);
err_alloc_disk:
unregister_blkdev(blkdev_major, TEST_SSD_DEV_NAME); // !!!
err_register_blkdev:
return result;
}
static int test_ssd_remove(struct ssd_dev *ssd_dev)
{
struct io_que *dev_que;
printk("test_ssd_remove begin\n");
dev_que = ssd_dev->dev_que;
//free_irq(ssd_dev->pci_dev->irq, dev_que);
free_io_que(ssd_dev, dev_que);
del_gendisk(ssd_dev->disk);
put_disk(ssd_dev->disk);
unregister_blkdev(blkdev_major, TEST_SSD_DEV_NAME); // !!!
printk("test_ssd_remove end\n");
return 0;
}
static struct pci_device_id test_id_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_XILINX, TEST_PCI_DEVICE_ID_XILINX), },
{0,},
};
MODULE_DEVICE_TABLE(pci, test_id_tbl);
static void cdma_init_simple(void __iomem *pci_bar)
{
unsigned long read_val;
// default simple mode
read_val = readl((unsigned char *)pci_bar + C_BASEADDR + CDMA_CTRL_REG_OFFSET);
if ((read_val >> 3) & 0x1)
printk("it's sg mode\n");
else
printk("it's simple mode\n");
if ((read_val >> 2) & 0x1)
printk("reset in progress\n");
else
printk("normal operation\n");
read_val = readl((unsigned char *)pci_bar + C_BASEADDR + CDMA_STAT_REG_OFFSET);
if ((read_val >> 1) & 0x1)
printk("cdma is idle\n");
else
printk("cdma is not idle\n");
}
static int bars;
static int test_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) // !!!
{
struct ssd_dev *ssd_dev;
resource_size_t res_start, res_len; // actually it's integer type
int result = -ENOMEM;
printk("pci_driver_probe begin with vendor=%x, device=%x\n", id->vendor, id->device);
ssd_dev = (struct ssd_dev *)kmalloc(sizeof(struct ssd_dev), GFP_KERNEL); // !!!!!!
if (ssd_dev == NULL) {
printk("err: kmalloc ssd_dev\n");
goto err_kmalloc_ssd_dev;
}
ssd_dev->pci_dev = pci_dev; // !!!!!!
if (pci_enable_device(pci_dev) < 0) { // !!!
printk("err: pci_enable_device\n");
goto err_pci_enable_device;
}
pci_set_master(pci_dev); // !!!enables bus-mastering for device dev
//bars = pci_select_bars(pci_dev, IORESOURCE_MEM); // !!!<linux/ioport.h>
bars = 0; // !!!!!!it's already set in the hardware project
if (pci_request_selected_regions(pci_dev, bars, TEST_PCIE_DEV_NAME)) { // actually using __request_mem_region
printk("err: pci_request_selected_regions\n");
goto err_pci_request_selected_regions;
}
res_start = pci_resource_start(pci_dev, bars);
res_len = pci_resource_len(pci_dev, bars);
printk("pci_res_start=%lu, pci_res_len=%lu\n", (unsigned long)res_start, (unsigned long)res_len);
//request_mem_region(pci_resource_start(pci_dev, bars), pci_resource_len(pci_dev, bars), TEST_PCIE_DEV_NAME);
/* !!!associate with drivers/pci/msi.c, using pci_enable_msi_range,
* updates the @dev's irq member to the lowest new interrupt number;
*/
//if (pci_enable_msi(pci_dev) < 0)
//printk("pci_enable_msi: an error occurs\n");
//ssd_dev->irq = pci_dev->irq; // !!!!!!
pci_set_drvdata(pci_dev, ssd_dev); // !!!bind ssd_dev to pci_dev, for later use
//if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64)) < 0) // if return err
//printk("err: pci_set_dma_mask\n");
//if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)) < 0) // both needed
//printk("pci_set_consistent_dma_mask err\n");
// !!!!!!it's bars, not 0? set here! __iomem is needed
ssd_dev->pci_bar = ioremap(res_start, res_len); // !!!
if (ssd_dev->pci_bar == NULL) {
printk("err: ioremap\n");
goto err_ioremap;
}
//writel(0x0, (unsigned char *)ssd_dev->pci_bar + 0xc000); // !!!!!!CDMA CTL 设定为single模式
//if ((readl((unsigned char *)ssd_dev->pci_bar + 0xc000) >> 3) & 0x1)
//printk("CDMA CTL settings err: is not single mode\n");
//printk("CDMA STATUS is %x\n", readl((unsigned char *)ssd_dev->pci_bar + 0xc004)); // !!!
cdma_init_simple(ssd_dev->pci_bar);
printk("pci bus init has successfully ended\n");
if (test_ssd_init(ssd_dev)) { // above
printk("err_test_ssd_init\n");
goto err_ssd_init;
}
printk("pci_driver_probe end\n");
return 0;
err_ssd_init:
iounmap(ssd_dev->pci_bar);
err_ioremap:
pci_set_drvdata(pci_dev, NULL); // !!!where should it be?
//pci_disable_msi(pci_dev);
pci_release_selected_regions(pci_dev, bars); // !!!
err_pci_request_selected_regions:
pci_clear_master(pci_dev); // !!!
pci_disable_device(pci_dev);
err_pci_enable_device:
kfree(ssd_dev);
err_kmalloc_ssd_dev:
return result;
}
static void test_remove(struct pci_dev *pci_dev)
{
struct ssd_dev *ssd_dev;
printk("pci_driver_remove begin\n");
ssd_dev = (struct ssd_dev *)pci_get_drvdata(pci_dev); // has been binded before
test_ssd_remove(ssd_dev); // above
iounmap(ssd_dev->pci_bar); // !!!
pci_set_drvdata(pci_dev, NULL);
//pci_disable_msi(pci_dev);
pci_release_selected_regions(pci_dev, bars); // !!!original: pci_release_regions(pci_dev);
pci_clear_master(pci_dev);
pci_disable_device(pci_dev);
kfree(ssd_dev);
printk("pci_driver_remove end\n");
}
static struct pci_driver pci_driver_inst = {
.name = TEST_PCIE_DEV_NAME,
.id_table = test_id_tbl,
.probe = &test_probe,
.remove = &test_remove,
};
static int __init test_module_init(void)
{
int result = -EBUSY;
printk("module_init_fn begin\n");
if (pci_register_driver(&pci_driver_inst)) {
printk("err_register_driver\n");
goto err_register_driver;
}
printk("module_init_fn end\n");
return 0;
err_register_driver:
return result;
}
module_init(test_module_init);
static void __exit test_module_exit(void)
{
printk("module_exit_fn begin\n");
pci_unregister_driver(&pci_driver_inst);
printk("module_exit_fn end\n");
}
module_exit(test_module_exit);
MODULE_LICENSE("GPL"); // !!!
更多推荐
PCIe-块设备驱动-Single DMA
发布评论