Skip to content
Snippets Groups Projects
altera_dma.c 17.89 KiB

#include <linux/time.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/atomic.h>
#include <linux/random.h>
#include "altera_dma_cmd.h"
#include <linux/unistd.h>
#define P40_FMT "P40DMA:%s(): "
#define PCIE40_DMA_CLASS "pcie40_dma"
#include "altera_dma.h"
#define TIMEOUT 0x2000000


static long altera_dma_ioctl (struct file *filp, unsigned int cmd, unsigned long arg)
{
  int rc=-2;
    struct pcie40_dma_state *bk_ptr = filp->private_data;
    switch (cmd) {
        case ALTERA_IOCX_START:
            rc=  dma_test(bk_ptr, bk_ptr->pci_dev);
	    wait_event_interruptible(bk_ptr->wait_q, !atomic_read(&bk_ptr->status));     
            break;
        case ALTERA_CMD_WAIT_DMA: 
	
	  //  wait_event_interruptible(bk_ptr->wait_q, !atomic_read(&bk_ptr->status));
            break;
       
   }  

    return rc;
}


static int altera_dma_mmap(struct file *file,struct vm_area_struct *vma ) {
     unsigned long mem_addr;
    unsigned int size;
    
    struct pcie40_dma_state *bk_ptr;

#if LINUX_VERSION_CODE <= KERNEL_VERSION(3,6,999)
    vma->vm_flags |= VM_RESERVED ;
#else    
    vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
#endif

    size = vma->vm_end - vma->vm_start;
    
    bk_ptr = file->private_data;
    
    mem_addr = virt_to_phys(bk_ptr->rp_wr_buffer_virt_addr);

    printk(" map vma %p pfn %lX vm_start %lX vm_end %lX size %X\n", vma,
	   (mem_addr >> PAGE_SHIFT), vma->vm_start, vma->vm_end, size);

    if (remap_pfn_range(vma, vma->vm_start, (mem_addr >> PAGE_SHIFT), size, vma->vm_page_prot)){
      printk(P40_ERR "Error remap_pfn_range \n",P40_PARM);
      return -EAGAIN;
    }
    return 0;
}
int altera_dma_open(struct inode *inode, struct file *file) {
    struct pcie40_dma_state *bk_ptr = 0;

    bk_ptr = container_of(inode->i_cdev, struct pcie40_dma_state, cdev);
    file->private_data = bk_ptr;
    bk_ptr->user_pid = current->pid;

    return 0;
}

int altera_dma_release(struct inode *inode, struct file *file) {
    return 0;
}

/*static irqreturn_t dma_isr(int irq, void *dev_id)
{
 
  struct pcie40_dma_state *bk_ptr = (struct pcie40_dma_state* )dev_id;
  if (!bk_ptr){ 
    printk(KERN_ALERT "bk_ptr lost \n");
    return IRQ_NONE;
  }
 atomic_set(&bk_ptr->status, 0);
  bk_ptr->irq_count++;
  if (bk_ptr->irq_count%1000) printk(KERN_ALERT "nombre IRq %d\n",bk_ptr->irq_count++);
   wake_up(&bk_ptr->wait_q);
  return IRQ_HANDLED;
} 
*/
struct file_operations altera_dma_fops = {
    .owner          = THIS_MODULE,
    .mmap           = altera_dma_mmap,
    .open           = altera_dma_open,
    .release        = altera_dma_release,
    .unlocked_ioctl = altera_dma_ioctl,
};
static struct class *pcie40_dma_class = NULL;
static int __init init_chrdev (struct pcie40_dma_state *bk_ptr) {
    int dev_minor = 0;
    int dev_major = 0;
    int devno = 0;
    int rc =-1;
  
   rc = alloc_chrdev_region(&bk_ptr->cdevno, dev_minor, 1, ALTERA_DMA_DEVFILE);
   if (rc < 0) {
      printk(P40_ERR "alloc_chrdev_region()\n", P40_PARM);
      goto err_alloc_chrdev_region;
    }
    dev_major = MAJOR(bk_ptr->cdevno);
    if (dev_major < 0) {
      printk(P40_ERR "cannot get major ID %d",P40_PARM, dev_major);
    }
    
    devno = MKDEV(dev_major, dev_minor);
   
    rc = pcie40_setup_cdev(pcie40_dma_class, &(bk_ptr->cdev), devno, dev_minor, 0, ALTERA_DMA_DEVFILE, bk_ptr->common->dev_id, &altera_dma_fops);
    if (rc < 0) {
      goto err_bar0_dev;
    }
   return 0;
  err_bar0_dev:
    unregister_chrdev_region(bk_ptr->cdevno, 1);
  err_alloc_chrdev_region:
    return rc;
    
}

static int set_write_desc(struct dma_descriptor *wr_desc, u64 source, dma_addr_t dest, u32 ctl_dma_len, u32 id)
{
    wr_desc->src_addr_ldw = cpu_to_le32(source & 0xffffffffUL);
    wr_desc->src_addr_udw = cpu_to_le32((source >> 32));
    wr_desc->dest_addr_ldw = cpu_to_le32(dest & 0xffffffffUL);
    wr_desc->dest_addr_udw = cpu_to_le32((dest >> 32));
    wr_desc->ctl_dma_len = cpu_to_le32(ctl_dma_len | (id << 18));
    wr_desc->reserved[0] = cpu_to_le32(0x0);
    wr_desc->reserved[1] = cpu_to_le32(0x0);
    wr_desc->reserved[2] = cpu_to_le32(0x0);
    return 0;
}



static int init_ep_mem(struct pcie40_dma_state *bk_ptr, u32 mem_base, u32 num_dwords, u32 init_value, u32 cpt)
{
    u32 i = 0;
    iowrite32 (cpu_to_le32(cpt), (u32 *)(bk_ptr->bar[4]+mem_base));
    for (i = 1; i < num_dwords; i++) {
      
        iowrite32 (cpu_to_le32(i), (u32 *)(bk_ptr->bar[4]+mem_base)+i);
	wmb();
	}

    return 0;
}

static int ep_read(u8 *virt_addr, struct pcie40_dma_state *bk_ptr, u32 mem_base, u32 num_dwords)
{
    u32 i = 0;
    u32 ep_data = 0;
    
   
    //printk(KERN_DEBUG "RP                      EP");
    for (i = 0; i < num_dwords; i+=128) {
       	  ep_data = ioread32((u32 *)(bk_ptr->bar[4]+mem_base)+i);
	  rmb();
   
     printk(KERN_DEBUG "%p: 0x%08x ", (u64 *)((u32*)virt_addr+i), ep_data);
    }
    return 0;
}


static int set_lite_table_header(struct lite_dma_header *header)
{
    int i;
    for (i = 0; i < 128; i++)
        header->flags[i] = cpu_to_le32(0x0); 
    return 0;
}

static int dma_test(struct pcie40_dma_state *bk_ptr, struct pci_dev *dev)
{

    u8 *rp_wr_buffer_virt_addr = bk_ptr->rp_wr_buffer_virt_addr;
    u32 last_id, write_127;
    u32	timeout;
    void * __iomem base_bar0;
    int Cpt=0;
    int i;
    base_bar0 =bk_ptr->common->bar0_regs;
    atomic_set(&bk_ptr->status, 1);
    /*         printk(KERN_DEBUG " before read at 0x50120 %x \n" ,pcie40_read32_bar2 (bk_ptr->common,0x00));
      pcie40_write32_bar2 (bk_ptr->common,0x00,0x1FF);
     	wmb();
        if ( pcie40_wait_data(bk_ptr->common) == 0 ) {
           atomic_set(&bk_ptr->status, 0);
           return -2; // no data in fifo
	 }
    */
    
    
    
   //    iowrite32 (0, base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_CONTROL);// for MSI
   	timeout = TIMEOUT;
	write_127 = 0;
	last_id = ioread32((u32 *)(base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_LAST_PTR));
	
        set_lite_table_header((struct lite_dma_header *)bk_ptr->lite_table_wr_cpu_virt_addr);

 
        if(last_id == 0xFF)
         last_id =  bk_ptr->dma_status.altera_dma_descriptor_num - 1;

 
	last_id = last_id + bk_ptr->dma_status.altera_dma_descriptor_num;
 
 
	if(last_id >  (bk_ptr->dma_status.altera_dma_descriptor_num -1) ){
 
 
	last_id = last_id -  bk_ptr->dma_status.altera_dma_descriptor_num;
	if((bk_ptr->dma_status.altera_dma_descriptor_num > 1) && (last_id != ( bk_ptr->dma_status.altera_dma_descriptor_num-1))) write_127 = 1;
	}
	
	
	if(write_127) iowrite32 (( bk_ptr->dma_status.altera_dma_descriptor_num - 1), base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_LAST_PTR);

	// Start DMA 
	iowrite32 (last_id, base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_LAST_PTR);
		
	while (1) {
	  if (bk_ptr->lite_table_wr_cpu_virt_addr->header.flags[last_id]) {
	                break;
                    }
		    
		    if(timeout == 0){
                        memset(rp_wr_buffer_virt_addr, 0, bk_ptr->dma_status.altera_dma_num_dwords*4);
			bk_ptr->dma_status.write_eplast_timeout = 1;
			printk(KERN_DEBUG "Write DMA times out\n");
			printk(KERN_DEBUG "DWORD = %08x\n", bk_ptr->dma_status.altera_dma_num_dwords);
			printk(KERN_DEBUG "Desc = %08x\n", bk_ptr->dma_status.altera_dma_descriptor_num); 
			atomic_set(&bk_ptr->status, 0);
			return -1;
		    }

		    timeout--;
                    //cpu_relax();
		    
	   }

	// pcie40_reset_fifo(bk_ptr->common);	
     atomic_set(&bk_ptr->status, 0);
   
    return 1;

}

static int __init map_bars(struct pcie40_dma_state *bk_ptr, struct pci_dev *dev)
{


    int i;
    struct pcie40_state *common;
    common = bk_ptr->common;
    if ( common->bar_start[BAR0] && common->bar_size[BAR0]){
        bk_ptr->bar[BAR0] = pci_iomap(dev,BAR0, common->bar_size[BAR0]);
        if (!bk_ptr->bar[BAR0]) {
            dev_err(&dev->dev, "could not map BAR[%d]",BAR0);
            return -1;
        } else
            dev_info(&dev->dev, "BAR[%d] mapped to 0x%p, length %lu", 2, bk_ptr->bar[BAR0], (long unsigned int)common->bar_size[BAR0]); 
    }
    if ( common->bar_start[BAR4] && common->bar_size[BAR4]){
        bk_ptr->bar[BAR4] = pci_iomap(dev,BAR4, common->bar_size[BAR4]);
        if (!bk_ptr->bar[BAR4]) {
            dev_err(&dev->dev, "could not map BAR[%d]", BAR4);
            return -1;
        } else
            dev_info(&dev->dev, "BAR[%d] mapped to 0x%p, length %lu", 4, bk_ptr->bar[BAR4], (long unsigned int)common->bar_size[BAR4]); 
    }
    return 0;
}

static void unmap_bars(struct pcie40_dma_state *bk_ptr, struct pci_dev *dev)
{
    int i;
    for (i = 0; i < P40_MAX_BAR; i++) {
        if (bk_ptr->bar[i]) {
            pci_iounmap(dev, bk_ptr->bar[i]);
            bk_ptr->bar[i] = NULL;
        }
    }
}
static void altera_pcie40_set_drvdata(struct pci_dev *pdev, struct pcie40_dma_state *state)
{
  struct pcie40_state *common = pci_get_drvdata(pdev);
  common->dma_state = state;
  common->dma_state->common = common;
}
static struct pcie40_dma_state *altera_pcie40_get_drvdata(struct pci_dev *pdev)
{
  struct pcie40_state *common = pci_get_drvdata(pdev);
  return common->dma_state;
}

int  altera_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
    int rc = 0;
    int i;
    struct pcie40_dma_state *bk_ptr = NULL;
    struct pcie40_state *common;
    void * __iomem base_bar0;

    bk_ptr = kzalloc(sizeof(struct pcie40_dma_state), GFP_KERNEL);
    if(!bk_ptr)
        goto err_bk_alloc;

    bk_ptr->pci_dev = dev;
    altera_pcie40_set_drvdata(dev, bk_ptr);
    common = pci_get_drvdata(dev);
    bk_ptr->common = common;
  
    base_bar0 =common->bar0_regs;

    rc = init_chrdev(bk_ptr); 
    if (rc) {
        dev_err(&dev->dev, "init_chrdev() failed\n");
        goto err_initchrdev;
    }
    rc = pci_enable_device(dev);
    if (rc) {
        dev_err(&dev->dev, "pci_enable_device() failed\n");
        goto err_enable;
    } else {
        dev_info(&dev->dev, "pci_enable_device() successful");
    }
    //rc = pci_request_regions(dev, P40_DRV_NAME);
    rc = pci_request_selected_regions_exclusive(dev, DMA_BARS, P40_DRV_NAME);
    if (rc) {
        dev_err(&dev->dev, "pci_request_regions() failed\n");
        goto err_regions;
          }
  printk(P40_DIAG "  pci request ok\n", P40_PARM);
    pci_set_master(dev);
    rc = pci_enable_msi(dev);
    if (rc) {
        dev_info(&dev->dev, "pci_enable_msi() failed\n");
        bk_ptr->msi_enabled = 0;
    } else {
        dev_info(&dev->dev, "pci_enable_msi() successful\n");
        bk_ptr->msi_enabled = 1;
    }
    pci_read_config_byte(dev, PCI_REVISION_ID, &bk_ptr->revision);
    pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &bk_ptr->irq_pin);
    pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &bk_ptr->irq_line);

    if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))) {
        pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
        dev_info(&dev->dev, "using a 64-bit irq mask\n");
    } else {
        dev_info(&dev->dev, "unable to use 64-bit irq mask\n");
        goto err_dma_mask;
    }

    dev_info(&dev->dev, "irq pin: %d\n", bk_ptr->irq_pin);
    dev_info(&dev->dev, "irq line: %d\n", bk_ptr->irq_line);
    dev_info(&dev->dev, "irq: %d\n", dev->irq);

   

    bk_ptr->irq_count=0;
    //  scan_bars(bk_ptr, dev);
    //map_bars(bk_ptr, dev);

    // waitqueue for user process
    init_waitqueue_head(&bk_ptr->wait_q);

    // set default settings to run
    bk_ptr->dma_status.altera_dma_num_dwords = ALTERA_DMA_NUM_DWORDS;
    bk_ptr->dma_status.altera_dma_descriptor_num = ALTERA_DMA_DESCRIPTOR_NUM;
    bk_ptr->dma_status.run_write = 1;
    bk_ptr->dma_status.run_read = 1;
    bk_ptr->dma_status.run_simul = 1;
    bk_ptr->dma_status.offset = 0;
    bk_ptr->dma_status.onchip = 1;
    bk_ptr->dma_status.rand = 0;
       bk_ptr->lite_table_rd_cpu_virt_addr = ((struct lite_dma_desc_table *)pci_alloc_consistent(dev, sizeof(struct lite_dma_desc_table), &bk_ptr->lite_table_rd_bus_addr));

    if ( !bk_ptr->lite_table_rd_cpu_virt_addr) {
        rc = -ENOMEM;
        goto err_rd_table;
    }
      bk_ptr->lite_table_wr_cpu_virt_addr = ((struct lite_dma_desc_table *)pci_alloc_consistent(dev, sizeof(struct lite_dma_desc_table), &bk_ptr->lite_table_wr_bus_addr));
   
    if (!bk_ptr->lite_table_wr_cpu_virt_addr) {
        rc = -ENOMEM;
        goto err_wr_table;
    }
    bk_ptr->numpages = (PAGE_SIZE >= MAX_NUM_DWORDS*4) ? 1 : (int)((MAX_NUM_DWORDS*4)/PAGE_SIZE);
    bk_ptr->rp_rd_buffer_virt_addr = pci_alloc_consistent(dev, PAGE_SIZE*bk_ptr->numpages, &bk_ptr->rp_rd_buffer_bus_addr);
    if (!bk_ptr->rp_rd_buffer_virt_addr) {
        rc = -ENOMEM;
        goto err_rd_buffer;
    }
    bk_ptr->rp_wr_buffer_virt_addr = pci_alloc_consistent(dev, PAGE_SIZE*bk_ptr->numpages, &bk_ptr->rp_wr_buffer_bus_addr);
    printk(P40_DIAG "  pci alloc size %x \n", P40_PARM,PAGE_SIZE*bk_ptr->numpages );
    if (!bk_ptr->rp_wr_buffer_virt_addr) {
      printk(P40_DIAG " ERROR ENOMEM virt_addr ", P40_PARM,ENOMEM); 
        rc = -ENOMEM;
        goto err_wr_buffer;
    }
    

    for ( i=0; i< ALTERA_DMA_DESCRIPTOR_NUM; i++) {

      if (i==0)    set_write_desc(&bk_ptr->lite_table_wr_cpu_virt_addr->descriptors[i],ALTERA_MEM_MAIN_BUF0_START, (dma_addr_t)(bk_ptr->rp_wr_buffer_bus_addr)+(i* 0x8000),DMASIZE, i);
      if (i==1)    set_write_desc(&bk_ptr->lite_table_wr_cpu_virt_addr->descriptors[i],ALTERA_MEM_MAIN_BUF1_START, (dma_addr_t)(bk_ptr->rp_wr_buffer_bus_addr)+(i* 0x8000),DMASIZE, i);
      if (i==2)    set_write_desc(&bk_ptr->lite_table_wr_cpu_virt_addr->descriptors[i],ALTERA_MEM_MAIN_BUF2_START, (dma_addr_t)(bk_ptr->rp_wr_buffer_bus_addr)+(i* 0x8000),DMASIZE, i);
      if (i==3)    set_write_desc(&bk_ptr->lite_table_wr_cpu_virt_addr->descriptors[i],ALTERA_MEM_MAIN_BUF3_START, (dma_addr_t)(bk_ptr->rp_wr_buffer_bus_addr)+(i* 0x8000),DMASIZE, i);

    }
wmb();
 
        iowrite32 (((dma_addr_t)bk_ptr->lite_table_wr_bus_addr)>>32, base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_RC_HIGH_SRC_ADDR);   
        iowrite32 ((dma_addr_t)bk_ptr->lite_table_wr_bus_addr, base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_RC_LOW_SRC_ADDR);
 
      	iowrite32 (WR_CTRL_BUF_BASE_HI, base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_CTRL_HIGH_DEST_ADDR);
 	iowrite32 (WR_CTRL_BUF_BASE_LOW, base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_CTLR_LOW_DEST_ADDR);
     iowrite32 (0, base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_CONTROL);// no to begin MSI    

        wmb();
        bk_ptr->dma_status.altera_dma_num_dwords =DMASIZE;
iowrite32 (bk_ptr->dma_status.altera_dma_descriptor_num -1, base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_TABLE_SIZE);// valeur par defaut 127
iowrite32(0xFF,(base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_LAST_PTR));
  dev_info(&dev->dev, "config of dma done\n");
	/* rc=  request_irq(bk_ptr->irq_line, dma_isr, IRQF_SHARED, P40_DRV_NAME, (void *)bk_ptr); 
   
    if (rc) {
        dev_info(&dev->dev, "Could not request IRQ #%d", bk_ptr->irq_line);
        bk_ptr->irq_line = -1;
        goto err_irq;
    } else {
        dev_info(&dev->dev, "request irq: %d", bk_ptr->irq_line);
    }
 wmb();
    iowrite32 (1, base_bar0+DESC_CTRLLER_BASE+ALTERA_LITE_DMA_WR_CONTROL);// for MSI
   IRQ not working MTQ 	*/
    return 0;
    // error clean up
err_wr_buffer:
    dev_err(&dev->dev, "goto err_wr_buffer");
    pci_free_consistent(dev, PAGE_SIZE*bk_ptr->numpages, bk_ptr->rp_rd_buffer_virt_addr, bk_ptr->rp_rd_buffer_bus_addr);
err_rd_buffer:
    dev_err(&dev->dev, "goto err_rd_buffer");
   
err_wr_table:
    dev_err(&dev->dev, "goto err_wr_table");
    
err_rd_table:
    dev_err(&dev->dev, "goto err_rd_table");
    //   free_irq(bk_ptr->irq_line, (void *)bk_ptr);

    //err_irq:
    //unmap_bars(bk_ptr, dev);
    //dev_err(&dev->dev, "goto err_regions");
err_dma_mask:
    dev_err(&dev->dev, "goto err_dma_mask");
    pci_release_regions(dev);
err_regions:
    dev_err(&dev->dev, "goto err_irq");
    pci_disable_device(dev);
err_enable:
    dev_err(&dev->dev, "goto err_enable");
    unregister_chrdev_region (bk_ptr->cdevno, 1);
err_initchrdev:
    dev_err(&dev->dev, "goto err_initchrdev");
    kfree(bk_ptr);
err_bk_alloc:
    dev_err(&dev->dev, "goto err_bk_alloc");
    return rc;
}


void  altera_pci_remove(struct pci_dev *dev)
{
    struct pcie40_dma_state *bk_ptr = NULL;
    bk_ptr = altera_pcie40_get_drvdata(dev);
    
    device_destroy(pcie40_dma_class, MKDEV(MAJOR(bk_ptr->cdevno), MINOR(bk_ptr->cdevno)));
    cdev_del(&bk_ptr->cdev);
    unregister_chrdev_region(bk_ptr->cdevno, 1);
    pci_disable_device(dev);
    if(bk_ptr) {
        if(bk_ptr->msi_enabled) {
            pci_disable_msi(dev);
            bk_ptr->msi_enabled = 0;
        }
    }
    //unmap_bars(bk_ptr, dev);
    pci_release_regions(dev);
    /*  
    if (bk_ptr->irq_line >= 0) {
        printk(KERN_DEBUG "Freeing IRQ #%d", bk_ptr->irq_line);
        free_irq(bk_ptr->irq_line, (void *)bk_ptr);

    }
    */
     pci_free_consistent(dev, sizeof(struct lite_dma_desc_table), bk_ptr->lite_table_rd_cpu_virt_addr, bk_ptr->lite_table_rd_bus_addr);
    pci_free_consistent(dev, sizeof(struct lite_dma_desc_table), bk_ptr->lite_table_wr_cpu_virt_addr, bk_ptr->lite_table_wr_bus_addr);

  
    pci_free_consistent(dev, PAGE_SIZE*bk_ptr->numpages, bk_ptr->rp_rd_buffer_virt_addr, bk_ptr->rp_rd_buffer_bus_addr);
    pci_free_consistent(dev, PAGE_SIZE*bk_ptr->numpages, bk_ptr->rp_wr_buffer_virt_addr, bk_ptr->rp_wr_buffer_bus_addr);

    kfree(bk_ptr);
    printk(P40_INFO ": " "altera_dma_remove()," " " __DATE__ " " __TIME__ " " "\n");
}

int  altera_dma_init(void)
{
    int rc = 0;

    printk(KERN_DEBUG P40_DRV_NAME ": " "altera_dma_init()," " " __DATE__ " " __TIME__ " " "\n");
   
  pcie40_dma_class = class_create(THIS_MODULE, PCIE40_DMA_CLASS);
  if (IS_ERR(pcie40_dma_class)) {
    rc = PTR_ERR(pcie40_dma_class);
    printk(P40_WARN "failed to register class, %d\n", P40_PARM, rc);
    goto err_class_create;
  }
  //pcie40_ecs_class->dev_uevent = pcie40_dev_uevent;
  pcie40_dma_class->devnode = pcie40_devnode;
  return rc;

err_class_create:
  return rc;

   
}

void altera_dma_exit(void)
{
  class_destroy(pcie40_dma_class);
  
}