Skip to content
Snippets Groups Projects
Commit f116fb52 authored by Patrick Robbe's avatar Patrick Robbe
Browse files

New structure

parent 39f841ac
No related branches found
No related tags found
No related merge requests found
//p40fpga``+
#define P40_FMT "P40DAQ:%s(): "
#define PCIE40_DAQ_CLASS "pcie40_daq"
#include <linux/pci.h>
#include <linux/types.h>
struct pcie40_dma_map;
static int dma_map_alloc(struct pci_dev *pci_dev, struct pcie40_dma_map* map, void __iomem *base, size_t max_entries, size_t desired_size);
static int dma_map_free(struct pci_dev *pci_dev, struct pcie40_dma_map* map);
#include "daq.h"
#include <linux/interrupt.h>
//+`EXPECTED_FPGA_VERSION`
#define EXPECTED_FPGA_VERSION (0x0400)//?>
static void pcie40_daq_set_drvdata(struct pci_dev *pci_dev, struct pcie40_daq_state *state)
{
struct pcie40_state *common = pci_get_drvdata(pci_dev);
common->daq_state = state;
common->daq_state->common = common;
}
static struct pcie40_daq_state *pcie40_daq_get_drvdata(struct pci_dev *pci_dev)
{
struct pcie40_state *common = pci_get_drvdata(pci_dev);
return common->daq_state;
}
//NOTE: select/epoll behaviour:
// - if MSI_MODE is DAQ:
// poll the _ctrl* file descriptor for CTRL_MSI_BLOCKS interrupts
// - if MSI_MODE is MAIN:
// poll the _main* file descriptor for MAIN_MSI_BYTES interrupts
// - if MSI_MODE is META:
// poll the _meta* file descriptor for META_MSI_BYTES interrupts
//+`pcie_bus_detect_mps`
static void pcie_bus_detect_mps(struct pci_dev *pci_dev)//;?>
{
struct pci_dev *bridge = pci_dev->bus->self;
int mps, p_mps;
if (!bridge)
return;
mps = pcie_get_mps(pci_dev);
p_mps = pcie_get_mps(bridge);
if (mps != p_mps)
dev_warn(&pci_dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
mps, pci_name(bridge), p_mps);
else
dev_info(&pci_dev->dev, "Max Payload Size = %d\n", mps);
}
//+`dma_map_write_entry`
static void dma_map_write_entry(struct pcie40_dma_map* map, int i)//;?>
{
struct pcie40_dma_buffer* buffer = map->entries + i;
size_t pages = buffer->size >> PAGE_SHIFT;
iowrite32(buffer->start >> 32, map->base + i * 8 + 4);
iowrite32((buffer->start | pages) & 0xFFFFFFFF, map->base + i * 8);
wmb();
}
//+`dma_map_read_entry_base`
static inline dma_addr_t dma_map_read_entry_base(struct pcie40_dma_map* map, int i)//;?>
{
uint64_t lo, hi;
dma_addr_t base;
struct pcie40_dma_buffer* buffer = map->entries + i;
lo = ioread32(map->base + i * 8);
hi = ioread32(map->base + i * 8 + 4);
base = (hi << 32) | (lo & PAGE_MASK);
WARN_ON(base != buffer->start);
return base;
}
//+`dma_map_read_entry_size`
static inline size_t dma_map_read_entry_size(struct pcie40_dma_map* map, int i)//;?>
{
uint32_t lo;
size_t size;
struct pcie40_dma_buffer* buffer = map->entries + i;
lo = ioread32(map->base + i * 8);
size = (lo & PAGE_MASK) << PAGE_SHIFT;
WARN_ON(size != buffer->size);
return size;
}
//+`dma_map_alloc`
static int dma_map_alloc(struct pci_dev *pci_dev, struct pcie40_dma_map* map, void __iomem *base, size_t max_entries, size_t desired_size)//;?>
{
int i;
map->base = base;
map->max_entries = max_entries;
map->num_entries = 0;
map->size = 0;
map->entries = kzalloc(sizeof(struct pcie40_dma_buffer)*max_entries, GFP_KERNEL);
printk(P40_DIAG "entries = 0x%p\n", P40_PARM, map->entries);
for (i = 0; i < map->max_entries; ++i) {
size_t alloc_size = desired_size - map->size > PCI_MAX_ALLOC
? PCI_MAX_ALLOC
: desired_size - map->size;
while (1) {
struct pcie40_dma_buffer* buffer = map->entries + i;
buffer->size = alloc_size;
buffer->ptr = pci_alloc_consistent(pci_dev, buffer->size, &buffer->start);
//printk("...%zu", buffer->size);
if (buffer->ptr == NULL) {
if (buffer->size < 1024*1024) {
printk("...NOT ENOUGH MEMORY!\n");
map->num_entries = i;
dma_map_free(pci_dev, map);
return -1;
}
alloc_size /= 2;
} else {
map->size += buffer->size;
break;
}
}
dma_map_write_entry(map, i);
if (map->size >= desired_size) {
++i;
break;
}
}
map->num_entries = i;
return 0;
}
//+`dma_map_free`
static int dma_map_free(struct pci_dev *pci_dev, struct pcie40_dma_map* map)//?>
{
int i;
for (i = 0; i < map->num_entries; ++i) {
struct pcie40_dma_buffer* buffer = map->entries + i;
pci_free_consistent(pci_dev, buffer->size, buffer->ptr, buffer->start);
}
kfree(map->entries);
map->entries = NULL;
return 0;
}
//+`pcie40_isr`
static irqreturn_t pcie40_isr(int irq, void *arg)//;?>
{
/* So, since the FPGA can't get more than one MSI from the kernel,
and since MSI-X is a bit overkill for now, we'll use the same isr
and the same wait queue, and the ONLY thing we do here is wake up the
wait queue, all timers and stuff could just go in the poll handler...
*/
// uint32_t write_off, read_off;
ktime_t now = ktime_get_real();
struct pcie40_daq_state *state = (struct pcie40_daq_state *)arg;
if (IS_ERR(state)) {
return IRQ_NONE;
}
spin_lock_irqsave(&state->main_stream.off_lock, state->main_stream.off_flags);
dma_stream_get_write_off(&state->main_stream);
spin_unlock_irqrestore(&state->main_stream.off_lock, state->main_stream.off_flags);
spin_lock_irqsave(&state->meta_stream.off_lock, state->meta_stream.off_flags);
dma_stream_get_write_off(&state->meta_stream);
spin_unlock_irqrestore(&state->meta_stream.off_lock, state->meta_stream.off_flags);
wake_up_interruptible(&state->wait);
//if (irq == state->irq_line) {
state->main_stream.msi_delay = ktime_sub(now, state->main_stream.msi_last);
/*
write_off = ioread32(state->common->bar1_regs
+ state->main_stream.regs_base
+ P40_DMA_DAQ_STREAM_OFF_HOST_BUF_WRITE_OFF);
read_off = write_off < PAGE_SIZE
? state->main_stream.map.size - PAGE_SIZE
: write_off - PAGE_SIZE;
iowrite32(read_off, state->common->bar1_regs
+ state->main_stream.regs_base
+ P40_DMA_DAQ_STREAM_OFF_HOST_BUF_READ_OFF);
*/
state->main_stream.msi_last = now;
state->main_stream.msi_count++;/*
} else if (irq == state->irq_line+1) {
state->meta_stream.msi_delay = ktime_sub(now, state->meta_stream.msi_last);
write_off = ioread32(state->common->bar1_regs
+ state->meta_stream.regs_base
+ P40_DMA_DAQ_STREAM_OFF_HOST_BUF_WRITE_OFF);
read_off = write_off < PAGE_SIZE
? state->meta_stream.map.size - PAGE_SIZE
: write_off - PAGE_SIZE;
iowrite32(read_off, state->common->bar1_regs
+ state->meta_stream.regs_base
+ P40_STREAM_GET_HOST_BUF_READ_OFF);
state->meta_stream.msi_last = now;
state->meta_stream.msi_count++;
}*/
return IRQ_HANDLED;
#if 0
uint32_t write_off, read_off;
ktime_t now = ktime_get_real();
//getnstimeofday(&now);
//diff_timeval(&(state->daq_delay), &now, &(state->daq_msi));
state->daq_delay = ktime_sub(now, state->daq_last_msi);
write_off = ioread32(state->common->bar1_regs + P40_OFF_MEM_WRITE_OFF);
read_off = write_off < PAGE_SIZE
? state->memmap.size - PAGE_SIZE
: write_off - PAGE_SIZE;
rmb();
if (read_off == 0xFFFFFFFF) {
printk(P40_ERR "Read unvalid value at P40_OFF_MEM_READ_OFF", P40_PARM);
return IRQ_HANDLED;
}
//automatically advance read pointer (this is for testing only, in reality the event builder should advance the pointer)
//read_off = read_off ? 0 : (state->daq_mem_size / 2);
/*read_off += 2*1024*2024; //TODO: this is actually a parameter
if (read_off >= state->daq_mem_size) {
read_off = 0;
}*/
iowrite32(read_off, state->common->bar1_regs + P40_OFF_MEM_READ_OFF);
wmb();
//memcpy(&(state->daq_msi), &now, sizeof(state->daq_msi));
state->daq_last_msi = now;
state->irq_count++;
#endif
return IRQ_HANDLED;
}
//+`pcie40_daq_probe`
int pcie40_daq_probe(struct pci_dev *dev, const struct pci_device_id *id)//;?>
{
int rc = 0;
struct pcie40_state *common;
struct pcie40_daq_state *state = NULL;
uint32_t regmap_version, fpga_version;
common = pci_get_drvdata(dev);
state = kzalloc(sizeof(struct pcie40_daq_state), GFP_KERNEL);
if (IS_ERR(state)) {
printk(P40_ERR "kzalloc()\n", P40_PARM);
rc = PTR_ERR(state);
goto err_kzalloc;
}
state->common = common;
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
init_waitqueue_head(&state->wait);
if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))) {
pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
printk(P40_INFO "using 64-bit DMA mask\n", P40_PARM);
} else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
printk(P40_INFO "using 32-bit DMA mask\n", P40_PARM);
} else {
printk(P40_WARN "unable to set 32-bit DMA mask\n", P40_PARM);
rc = -1; //TODO
goto err_pci_set_dma_mask;
}
pcie_bus_detect_mps(dev);
rc = alloc_chrdev_region(&(state->dev_num), P40_DAQ_CDEV_BASEMINOR, P40_DAQ_CDEV_COUNT, P40_DRV_NAME);
if (rc < 0) {
printk(P40_ERR "alloc_chrdev_region()\n", P40_PARM);
goto err_alloc_chrdev_region;
}
// Reset to default state
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_RESET, (1 << P40_RST_BIT_DEFAULT));
regmap_version = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_REGMAP);
printk(P40_INFO "Register map version: 0x%08X\n", P40_PARM, regmap_version);
if (regmap_version != P40_DMA_REGMAP_VERSION) {
printk(P40_ERR "Versions do not match (0x%08X expected)!\n", P40_PARM,
P40_DMA_REGMAP_VERSION);
rc = -1;
goto err_version;
}
fpga_version = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_VERSION);
printk(P40_INFO "FPGA core version: %X.%02X (%04X)\n", P40_PARM,
fpga_version >> 24, (fpga_version >> 16) & 0xFF, fpga_version & 0xFFFF);
if ((fpga_version >> 16) != EXPECTED_FPGA_VERSION) {
printk(P40_ERR "Version is not compatible with this driver!\n", P40_PARM);
rc = -1;
goto err_version;
}
// CTRL endpoint
rc = pcie40_setup_cdev(pcie40_daq_class, &(state->ctrl_cdev), state->dev_num, CTRL_CDEV_MINOR, 1, CTRL_CDEV_NAME, state->common->dev_id, &ctrl_file_ops);
if (rc < 0) {
goto err_dev_ctrl;
}
// TODO: the streams should be configured only when the corresponding device is opened (so, if we're not using metadata or odin, that memory will not be allocated at all)
if (mainmibs < 0 || mainmibs > MAIN_BUF_MIBS_MAX) {
mainmibs = MAIN_BUF_MIBS_MAX;
}
// MAIN stream
state->main_stream.cdev_name = MAIN_CDEV_NAME;
state->main_stream.cdev_minor = MAIN_CDEV_MINOR;
state->main_stream.regs_base = P40_DMA_DAQ_MAIN_STREAM_QSYS_BASE;
state->main_stream.state = state;
rc = dma_stream_configure(state->common->dev_id, &state->main_stream,
P40_DMA_DAQ_MAIN_MAP_QSYS_BASE, MAIN_MAP_MAX_ENTRIES, mainmibs * 1024LL*1024LL);
if (rc < 0) {
goto err_main_configure;
}
if (metamibs < 0 || metamibs > META_BUF_MIBS_MAX) {
metamibs = META_BUF_MIBS_MAX;
}
// META stream
state->meta_stream.cdev_name = META_CDEV_NAME;
state->meta_stream.cdev_minor = META_CDEV_MINOR;
state->meta_stream.regs_base = P40_DMA_DAQ_META_STREAM_QSYS_BASE;
state->meta_stream.state = state;
rc = dma_stream_configure(state->common->dev_id, &state->meta_stream,
P40_DMA_DAQ_META_MAP_QSYS_BASE, META_MAP_MAX_ENTRIES, metamibs * 1024LL*1024LL);
if (rc < 0) {
goto err_meta_configure;
}
// Continue PCIe configuration
rc = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &(state->irq_pin));
if (rc) {
printk(P40_WARN "unable to read PCI_INTERRUPT_PIN\n", P40_PARM);
goto err_pci_interrupt_pin;
}
printk(P40_INFO "IRQ pin = %d\n", P40_PARM, state->irq_pin);
rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &(state->irq_line));
if (rc) {
printk(P40_WARN "unable to read PCI_INTERRUPT_LINE\n", P40_PARM);
goto err_pci_interrupt_line;
}
printk(P40_INFO "IRQ line = %d\n", P40_PARM, state->irq_line);
//=Interrupt configuration
rc = pci_enable_msi(dev); //<
// A successful call allocates 1 interrupts to the device, regardless
// of how many MSIs the device supports. The device is switched from
// pin-based interrupt mode to MSI mode. The dev->irq number is changed
// to a new number which represents the message signaled interrupt;
// consequently, this function should be called before the driver calls
// request_irq(), because an MSI is delivered via a vector that is
//.different from the vector of a pin-based interrupt.
if (rc) {
//NOTE: from experience, you can try to pci_enable_msi_range to have more than one, but on all cpus I looked, YOU WILL ONLY GET ONE, MSI-X is needed to have more!
printk(P40_WARN "could not enable MSI (errno %d)\n", P40_PARM, rc);
} else {
printk(P40_INFO "enabled MSI interrupting\n", P40_PARM);
}
printk(P40_INFO "dev->irq = %d\n", P40_PARM, dev->irq);
rc = request_irq(dev->irq, pcie40_isr, 0, P40_DRV_NAME, state);
if (rc) {
printk(P40_WARN "could not request IRQ #%d, error %d\n", P40_PARM, dev->irq, rc);
state->irq_line = -1;
goto err_request_irq;
}
state->irq_line = (int)dev->irq; //TODO: is this value being overwritten somewhere? Sometimes it looks different in pcie40_remove!
//.For more information, please refer to https://www.kernel.org/doc/Documentation/PCI/MSI-HOWTO.txt
pcie40_daq_set_drvdata(dev, state);
return rc;
err_request_irq:
if (dev->irq >= 0) {
free_irq(dev->irq, state);
}
if (dev->msi_enabled) {
pci_disable_msi(dev);
}
err_pci_interrupt_line:
err_pci_interrupt_pin:
dma_stream_destroy(state->common->dev_id, &state->meta_stream);
err_meta_configure:
dma_stream_destroy(state->common->dev_id, &state->main_stream);
err_main_configure:
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, CTRL_CDEV_NAME);
device_destroy(pcie40_daq_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+CTRL_CDEV_MINOR));
err_dev_ctrl:
err_version:
unregister_chrdev_region(state->dev_num, P40_DAQ_CDEV_COUNT);
err_alloc_chrdev_region:
err_pci_set_dma_mask:
kfree(state);
err_kzalloc:
return rc;
}
//+`pcie40_daq_remove`
void pcie40_daq_remove(struct pci_dev *dev)//;?>
{
struct pcie40_daq_state *state;
printk(P40_DIAG "pci_dev = 0x%p\n", P40_PARM, dev);
state = pcie40_daq_get_drvdata(dev);
if (!dev || !state) {
printk(P40_DIAG "remove(dev = 0x%p) dev->driver_data = 0x%p\n", P40_PARM, dev, state);
return;
}
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
if (state->common->pci_dev != dev) {
printk(P40_DIAG "dev->dev.driver_data->pci_dev (0x%08lx) != dev (0x%08lx)\n", P40_PARM,
(unsigned long)state->common->pci_dev, (unsigned long)dev);
}
printk(P40_DIAG "disabling main DMA stream\n", P40_PARM);
iowrite32(0, state->common->bar1_regs
+ P40_DMA_DAQ_MAIN_STREAM_QSYS_BASE + P40_DMA_DAQ_STREAM_OFF_ENABLE);
printk(P40_DIAG "disabling meta DMA stream\n", P40_PARM);
iowrite32(0, state->common->bar1_regs
+ P40_DMA_DAQ_META_STREAM_QSYS_BASE + P40_DMA_DAQ_STREAM_OFF_ENABLE);
if (dev->irq >= 0) {
printk(P40_INFO "freeing IRQ %d\n", P40_PARM, dev->irq);
free_irq(dev->irq, state);
}
if (dev->msi_enabled) {
printk(P40_INFO "disabling MSIs\n", P40_PARM);
pci_disable_msi(dev);
}
printk(P40_INFO "processed %d MSIs in main stream\n",
P40_PARM, state->main_stream.msi_count);
printk(P40_INFO "processed %d MSIs in meta stream\n",
P40_PARM, state->meta_stream.msi_count);
dma_stream_destroy(state->common->dev_id, &state->meta_stream);
dma_stream_destroy(state->common->dev_id, &state->main_stream);
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, CTRL_CDEV_NAME);
device_destroy(pcie40_daq_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+CTRL_CDEV_MINOR));
unregister_chrdev_region(state->dev_num, P40_DAQ_CDEV_COUNT);
kfree(state);
}
#ifndef __PCIE40_DRIVER_DAQ_H
#define __PCIE40_DRIVER_DAQ_H
//p40driver``+
#include "pcie40_driver_common.h"
#include <linux/fs.h>
#include <linux/mm_types.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include "pcie40_ioctl.h"
#define P40_DAQ_CDEV_BASEMINOR (2)
#define P40_DAQ_CDEV_COUNT (4)
#define MAIN_MAP_MAX_ENTRIES (1024) // This should be read from the FPGA
//ug`pcie40_driver.options`mainmibs *mainmibs* = _M_::
// Amount of memory (in MiBs) to allocate for each _main_ stream circular buffer. The driver will try to allocate this much memory but the final DMA buffer might be smaller in case of memory pressure from the DMA allocator. The default (and maximum) value is 4096.
#define MAIN_BUF_MIBS_MAX (4096)
static int mainmibs = MAIN_BUF_MIBS_MAX;
module_param(mainmibs, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mainmibs, "Desired size (MiB) of main stream circular buffer");
#define META_MAP_MAX_ENTRIES (32) // This should be read from the FPGA
//ug`pcie40_driver.options`metamibs *metamibs* = _M_::
// Amount of memory (in MiBs) to allocate for each _meta_ stream circular buffer. The default (and maximum) value is 128.
#define META_BUF_MIBS_MAX (128) // TODO: this could potentially be reduced to 64
static int metamibs = META_BUF_MIBS_MAX;
module_param(metamibs, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(metamibs, "Desired size (MiB) of meta stream circular buffer");
//ug`pcie40_driver.files`ctrl _ /dev/pcie40_?_ctrl::
// Device to access the DMA controller.
#define CTRL_CDEV_MINOR (P40_DAQ_CDEV_BASEMINOR + 0)
static const char CTRL_CDEV_NAME[] = "ctrl";
//ug`pcie40_driver.files`main _ /dev/pcie40_?_main::
// Device to access the _main_ DMA stream.
#define MAIN_CDEV_MINOR (P40_DAQ_CDEV_BASEMINOR + 1)
static const char MAIN_CDEV_NAME[] = "main";
//ug`pcie40_driver.files`meta _ /dev/pcie40_?_meta::
// Device to access the _meta_ DMA stream.
#define META_CDEV_MINOR (P40_DAQ_CDEV_BASEMINOR + 2)
static const char META_CDEV_NAME[] = "meta";
////ug`pcie40_driver.files`odin _ /dev/pcie40_?_odin::
//// Device to access the _odin_ DMA stream.
#define ODIN_CDEV_MINOR (P40_DAQ_CDEV_BASEMINOR + 3)
static const char ODIN_CDEV_NAME[] = "odin";
struct pcie40_dma_buffer {
void *ptr;
dma_addr_t start;
size_t size;
};
struct pcie40_dma_map {
void __iomem* base;
size_t max_entries;
size_t num_entries;
struct pcie40_dma_buffer *entries;
size_t size;
};
struct pcie40_daq_state;
//+`pcie40_dma_stream` Kernelspace representation of a DMA stream between the PCIe40 FPGA and upstream memory.
struct pcie40_dma_stream {
const char *cdev_name;
int8_t cdev_minor;
struct cdev cdev;
struct pcie40_dma_map map;
size_t regs_base;
uint32_t write_off;
uint32_t read_off;
int msi;
ktime_t msi_last;
ktime_t msi_delay;
int msi_count;
// This lock prevents the read pointer to be changed when we're calculating how much data is available in the circular buffer (the write buffer can be advanced by the FPGA but this does not invalidate the results)
spinlock_t off_lock;
unsigned long off_flags;
struct pcie40_daq_state *state;
};
struct pcie40_daq_state {
struct pcie40_state *common;
int msi_base;
size_t msi_span;
wait_queue_head_t wait;
dev_t dev_num; //base MAJOR/MINOR numbers for device files
struct cdev ctrl_cdev;
struct pcie40_dma_stream main_stream;
struct pcie40_dma_stream meta_stream;
struct pcie40_dma_stream odin_stream;
u8 irq_line;
u8 irq_pin;
#ifdef PCIE40_EMU
struct task_struct *emu_thread;
#endif
};
static struct class *pcie40_daq_class = NULL;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
static int ctrl_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
#else
static long ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#endif //+`ctrl_ioctl`
{
struct pcie40_daq_state *state = filp->private_data;
uint64_t __user *arg_ptr = (uint64_t __user *)arg;
int err = 0;
uint64_t value;
if (_IOC_DIR(cmd) & _IOC_READ)
err = !access_ok(VERIFY_WRITE, arg_ptr, _IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
err = !access_ok(VERIFY_READ, arg_ptr, _IOC_SIZE(cmd));
if (err) return -EFAULT;
switch(cmd) {
case P40_CTRL_GET_RWTEST: //ioctl.pcie`P40_CTRL_GET_RWTEST`
// See regmap`pcie.dma_ctrl.rwtest` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_RWTEST);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_RWTEST: //ioctl.pcie`P40_CTRL_SET_RWTEST`
// See regmap`pcie.dma_ctrl.rwtest` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_RWTEST, value);
break;
case P40_CTRL_GET_VERSION: //ioctl.pcie`P40_CTRL_GET_VERSION`
// See regmap`pcie.dma_ctrl.version` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_VERSION);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_LINK_ID: //ioctl.pcie`P40_CTRL_GET_LINK_ID`
// The value returned by this IOCTL encodes both the PCI topological address and the FPGA interface number, as follows:
// _ bits [31..24]::
// PCI bus number
// _ bits [23..16]::
// PCI slot number
// _ bits [15..8]::
// PCI function number
// _ bits [7..0]::
// FPGA-PCIe interface number within a given board (0 for the primary link and 1 for the secondary)
value = state->common->link_id;
if (state->common->pci_dev) {
value |= state->common->pci_dev->bus->number << 24;
value |= PCI_SLOT(state->common->pci_dev->devfn) << 16;
value |= PCI_FUNC(state->common->pci_dev->devfn) << 8;
}
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHIP_ID: //ioctl.pcie`P40_CTRL_GET_CHIP_ID`
// See regmap`pcie.dma_ctrl.chip_id` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHIP_ID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHIP_ID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_RESET: //ioctl.pcie`P40_CTRL_GET_RESET`
// See regmap`pcie.dma_ctrl.reset` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_RESET);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_RESET: //ioctl.pcie`P40_CTRL_SET_RESET`
// See regmap`pcie.dma_ctrl.reset` .
__get_user(value, arg_ptr);
if (value) {
state->main_stream.msi_count = 0;
state->meta_stream.msi_count = 0;
}
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_RESET, value);
break;
case P40_CTRL_GET_ERROR: //ioctl.pcie`P40_CTRL_GET_ERROR`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_ERROR);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_MAIN_GEN_CTL: //ioctl.pcie`P40_CTRL_GET_MAIN_GEN_CTL`
// See regmap`pcie.dma_ctrl.main_gen_ctl` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_CTL);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_MAIN_GEN_CTL: //ioctl.pcie`P40_CTRL_SET_MAIN_GEN_CTL`
// See regmap`pcie.dma_ctrl.main_gen_ctl` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_CTL, value);
break;
case P40_CTRL_GET_MAIN_GEN_FIXED: //ioctl.pcie`P40_CTRL_GET_MAIN_GEN_FIXED`
// See regmap`pcie.dma_ctrl.main_gen_fixed` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_FIXED);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_MAIN_GEN_FIXED: //ioctl.pcie`P40_CTRL_SET_MAIN_GEN_FIXED`
// See regmap`pcie.dma_ctrl.main_gen_fixed` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_FIXED, value);
break;
case P40_CTRL_GET_MAIN_RAW_MODE: //ioctl.pcie`P40_CTRL_GET_MAIN_RAW_MODE`
// See regmap`pcie.dma_ctrl.main_raw_mode` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_RAW_MODE);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_META_PACKING: //ioctl.pcie`P40_CTRL_GET_META_PACKING`
// See regmap`pcie.dma_ctrl.meta_packing` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_META_PACKING);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_META_PACKING: //ioctl.pcie`P40_CTRL_SET_META_PACKING`
__get_user(value, arg_ptr);
// See regmap`pcie.dma_ctrl.meta_packing` .
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_META_PACKING, value);
break;
case P40_CTRL_GET_PCIE_GEN: //ioctl.pcie`P40_CTRL_GET_PCIE_GEN`
// See regmap`pcie.dma_ctrl.pcie_gen` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_PCIE_GEN);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_MSI_MODE: //ioctl.pcie`P40_CTRL_GET_MSI_MODE`
// See regmap`pcie.dma_ctrl.msi_mode` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MSI_MODE);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_MSI_MODE: //ioctl.pcie`P40_CTRL_SET_MSI_MODE`
// See regmap`pcie.dma_ctrl.msi_mode` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MSI_MODE, value);
break;
case P40_CTRL_GET_MAIN_MSI_BYTES: //ioctl.pcie`P40_CTRL_GET_MAIN_MSI_BYTES`
// See regmap`pcie.dma_ctrl.main_msi_bytes` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_MSI_BYTES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_MAIN_MSI_BYTES: //ioctl.pcie`P40_CTRL_SET_MAIN_MSI_BYTES`
__get_user(value, arg_ptr);
// See regmap`pcie.dma_ctrl.main_msi_bytes` .
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_MSI_BYTES, value);
break;
case P40_CTRL_GET_MAIN_MSI_CYCLES: //ioctl.pcie`P40_CTRL_GET_MAIN_MSI_CYCLES`
// See regmap`pcie.dma_ctrl.main_msi_cycles` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_MSI_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_META_MSI_BYTES: //ioctl.pcie`P40_CTRL_GET_META_MSI_BYTES`
// See regmap`pcie.dma_ctrl.meta_msi_bytes` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_META_MSI_BYTES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_META_MSI_BYTES: //ioctl.pcie`P40_CTRL_SET_META_MSI_BYTES`
// See regmap`pcie.dma_ctrl.meta_msi_bytes` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_META_MSI_BYTES, value);
break;
case P40_CTRL_GET_META_MSI_CYCLES: //ioctl.pcie`P40_CTRL_GET_META_MSI_CYCLES`
// See regmap`pcie.dma_ctrl.meta_msi_cycles` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_META_MSI_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_META_MSI_BLOCKS: //ioctl.pcie`P40_CTRL_GET_META_MSI_BLOCKS`
// See regmap`pcie.dma_ctrl.meta_msi_blocks` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_META_MSI_BLOCKS);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_META_MSI_BLOCKS: //ioctl.pcie`P40_CTRL_SET_META_MSI_BLOCKS`
// See regmap`pcie.dma_ctrl.meta_msi_blocks` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_META_MSI_BLOCKS, value);
break;
case P40_CTRL_GET_HOST_MAIN_MSI_NSECS: //ioctl.pcie`P40_CTRL_GET_HOST_MAIN_MSI_NSECS`
value = ktime_to_ns(state->main_stream.msi_delay);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_HOST_META_MSI_NSECS: //ioctl.pcie`P40_CTRL_GET_HOST_META_MSI_NSECS`
value = ktime_to_ns(state->meta_stream.msi_delay);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_INBUF_SIZE: //ioctl.pcie`P40_CTRL_GET_INBUF_SIZE`
// See regmap`pcie.dma_ctrl.inbuf_size`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_INBUF_SIZE);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_INBUF_FILL: //ioctl.pcie`P40_CTRL_GET_INBUF_FILL`
// See regmap`pcie.dma_ctrl.inbuf_fill`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_INBUF_FILL);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_TRUNC_THRES: //ioctl.pcie`P40_CTRL_GET_TRUNC_THRES`
// See regmap`pcie.dma_ctrl.trunc_thres`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_THRES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_TRUNC_THRES: //ioctl.pcie`P40_CTRL_SET_TRUNC_THRES`
// See regmap`pcie.dma_ctrl.trunc_thres`
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_THRES, value);
break;
case P40_CTRL_GET_TRUNC_TOTAL_CYCLES: //ioctl.pcie`P40_CTRL_GET_TRUNC_TOTAL_CYCLES`
// See regmap`pcie.dma_ctrl.trunc_total_cycles`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_TOTAL_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_TRUNC_TOTAL_SINCE_EVID: //ioctl.pcie`P40_CTRL_GET_TRUNC_TOTAL_SINCE_EVID`
// See regmap`pcie.dma_ctrl.trunc_total_since_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_TOTAL_SINCE_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_TOTAL_SINCE_EVID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_TRUNC_LAST_CYCLES: //ioctl.pcie`P40_CTRL_GET_TRUNC_LAST_CYCLES`
// See regmap`pcie.dma_ctrl.trunc_last_cycles`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_LAST_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_TRUNC_LAST_FROM_EVID: //ioctl.pcie`P40_CTRL_GET_TRUNC_LAST_FROM_EVID`
// See regmap`pcie.dma_ctrl.trunc_last_from_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_LAST_FROM_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_LAST_FROM_EVID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_TRUNC_LAST_TO_EVID: //ioctl.pcie`P40_CTRL_GET_TRUNC_LAST_TO_EVID`
// See regmap`pcie.dma_ctrl.trunc_last_to_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_LAST_TO_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_LAST_TO_EVID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHOKE_TOTAL_CYCLES: //ioctl.pcie`P40_CTRL_GET_CHOKE_TOTAL_CYCLES`
// See regmap`pcie.dma_ctrl.choke_total_cycles`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_TOTAL_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHOKE_TOTAL_SINCE_EVID: //ioctl.pcie`P40_CTRL_GET_CHOKE_TOTAL_SINCE_EVID`
// See regmap`pcie.dma_ctrl.choke_total_since_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_TOTAL_SINCE_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_TOTAL_SINCE_EVID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHOKE_LAST_CYCLES: //ioctl.pcie`P40_CTRL_GET_CHOKE_LAST_CYCLES`
// See regmap`pcie.dma_ctrl.choke_last_cycles`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_LAST_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHOKE_LAST_FROM_EVID: //ioctl.pcie`P40_CTRL_GET_CHOKE_LAST_FROM_EVID`
// See regmap`pcie.dma_ctrl.choke_last_from_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_LAST_FROM_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_LAST_FROM_EVID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHOKE_LAST_TO_EVID: //ioctl.pcie`P40_CTRL_GET_CHOKE_LAST_TO_EVID`
// See regmap`pcie.dma_ctrl.choke_last_to_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_LAST_TO_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_LAST_TO_EVID_LO);
__put_user(value, arg_ptr);
break;
/* unfortunately this is only possible in linux 4.x
case P40_CTL_GET_GEN:
switch (state->common->pci_dev->bus->cur_bus_speed) {
case PCIE_SPEED_2_5GT:
value = 1;
break;
case PCIE_SPEED_5_0GT:
value = 2;
break;
case PCIE_SPEED_8_0GT:
value = 3;
break;
default:
value = 0;
break;
}
__put_user(value, arg_ptr);
break;
*/
default:
printk(P40_ERR "invalid ioctl\n", P40_PARM);
return -EINVAL;
}
return 0;
}
static unsigned int ctrl_poll(struct file *file, poll_table *wait)
{
// Make sure device is in MSI_DAQ mode
// (Do we really want to do this at every poll()? Well it's just a few ns,
// and it's still better than doing it in the isr)
printk(P40_DIAG, P40_PARM);
return 0;
}
//+`ctrl_open`
static int ctrl_open(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_daq_state *state = container_of(inode->i_cdev, struct pcie40_daq_state, ctrl_cdev);
//printk(P40_INFO, P40_PARM);
filp->private_data = state;
return pcie40_device_accessible(state->common) ? 0 : -EIO;
}
//+`ctrl_release`
static int ctrl_release(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_daq_state *state = container_of(inode->i_cdev, struct pcie40_daq_state, ctrl_cdev);
//printk(P40_INFO, P40_PARM);
if (filp->private_data != state) {
printk(P40_DIAG "inconsistent private_data\n", P40_PARM);
return -EINVAL;
}
filp->private_data = NULL;
return 0;
}
static struct file_operations ctrl_file_ops = {
.owner = THIS_MODULE,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
.ioctl = ctrl_ioctl,
#else
.unlocked_ioctl = ctrl_ioctl,
#endif
.open = ctrl_open,
.release = ctrl_release,
.poll = ctrl_poll,
};
static int dma_map_mmap(struct pcie40_dma_map* map, struct vm_area_struct *vma);
static int dma_map_munmap(struct pcie40_dma_map* map);
static void dma_map_print(struct pcie40_dma_map* map);
static uint32_t dma_stream_get_read_off(struct pcie40_dma_stream *stream);
static void dma_stream_set_read_off(struct pcie40_dma_stream *stream, uint32_t read_off);
static uint32_t dma_stream_get_write_off(struct pcie40_dma_stream *stream);
static inline size_t dma_stream_get_bytes_used(struct pcie40_dma_stream *stream)
{
uint64_t host_buf_bytes = stream->map.size;
uint64_t host_write_off, host_read_off;
spin_lock_irqsave(&stream->off_lock, stream->off_flags);
host_write_off = dma_stream_get_write_off(stream); //TODO: use cached value
host_read_off = dma_stream_get_read_off(stream); //TODO: use cached value
spin_unlock_irqrestore(&stream->off_lock, stream->off_flags);
if (host_write_off >= host_read_off) {
return host_write_off - host_read_off;
} else {
return host_buf_bytes - (host_read_off - host_write_off);
}
}
static inline size_t dma_stream_get_bytes_free(struct pcie40_dma_stream *stream)
{
return stream->map.size - dma_stream_get_bytes_used(stream);
}
static inline uint32_t pcie40_read32_stream(struct pcie40_dma_stream *stream, unsigned long offset);
static inline void pcie40_write32_stream(struct pcie40_dma_stream *stream, unsigned long offset, uint32_t value);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
static int dma_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
#else
static long dma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#endif
//+`dma_ioctl`
{
struct pcie40_dma_stream *stream = filp->private_data;
uint64_t __user *arg_ptr = (uint64_t __user *)arg;
int err = 0;
uint64_t value;
if (_IOC_DIR(cmd) & _IOC_READ)
err = !access_ok(VERIFY_WRITE, arg_ptr, _IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
err = !access_ok(VERIFY_READ, arg_ptr, _IOC_SIZE(cmd));
if (err) return -EFAULT;
switch(cmd) {
case P40_STREAM_GET_ENABLE: //ioctl.pcie`P40_STREAM_GET_ENABLE`
// See regmap`pcie.dma_daq_stream.enable`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_ENABLE);
__put_user(value, arg_ptr);
break;
case P40_STREAM_SET_ENABLE: //ioctl.pcie`P40_STREAM_SET_ENABLE`
// See regmap`pcie.dma_daq_stream.enable`
__get_user(value, arg_ptr);
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_ENABLE, value);
break;
case P40_STREAM_GET_READY: //ioctl.pcie`P40_STREAM_GET_READY`
// See regmap`pcie.dma_daq_stream.ready`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_READY);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FLUSH: //ioctl.pcie`P40_STREAM_GET_FLUSH`
// See regmap`pcie.dma_daq_stream.flush`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FLUSH);
__put_user(value, arg_ptr);
break;
case P40_STREAM_SET_FLUSH: //ioctl.pcie`P40_STREAM_SET_FLUSH`
// See regmap`pcie.dma_daq_stream.flush`
__get_user(value, arg_ptr);
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FLUSH, value);
break;
case P40_STREAM_GET_FPGA_BUF_BYTES: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_BYTES`
// See regmap`pcie.dma_daq_stream.fpga_buf_bytes`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_BYTES);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FPGA_BUF_DESCS: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_DESCS`
// See regmap`pcie.dma_daq_stream.fpga_buf_descs`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESCS);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FPGA_BUF_DESC_BYTES: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_DESC_BYTES`
// See regmap`pcie.dma_daq_stream.fpga_buf_desc_bytes`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESC_BYTES);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FPGA_BUF_DESCS_FILL: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_DESCS_FILL`
// See regmap`pcie.dma_daq_stream.fpga_buf_descs_fill`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESCS_FILL_HI);
value <<= 32;
value |= pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESCS_FILL_LO);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FPGA_BUF_DESC_FILL_BYTES: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_DESC_FILL_BYTES`
// See regmap`pcie.dma_daq_stream.fpga_buf_desc_fill_bytes`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESC_FILL_BYTES);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FPGA_BUF_DESCS_BUSY: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_DESCS_BUSY`
// See regmap`pcie.dma_daq_stream.fpga_buf_descs_busy`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESCS_BUSY_HI);
value <<= 32;
value |= pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESCS_BUSY_LO);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_HOST_BUF_WRITE_OFF: //ioctl.pcie`P40_STREAM_GET_HOST_BUF_WRITE_OFF`
value = dma_stream_get_write_off(stream);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_HOST_BUF_READ_OFF: //ioctl.pcie`P40_STREAM_GET_HOST_BUF_READ_OFF`
value = dma_stream_get_read_off(stream);
__put_user(value, arg_ptr);
break;
/*
case P40_STREAM_SET_HOST_BUF_READ_OFF:
__get_user(value, arg_ptr);
iowrite32(value & 0xFFFFFFFF, stream->state->common->bar1_regs
+ stream->regs_base + P40_DMA_DAQ_STREAM_OFF_HOST_BUF_READ_OFF);
break;
*/
/*case P40_STREAM_GET_HOST_MAP_ENTRIES:
value = ioread32(stream->state->common->bar1_regs
+ stream->regs_base + P40_DMA_DAQ_STREAM_OFF_HOST_MAP_ENTRIES);
__put_user(value, arg_ptr);
break;
case P40_STREAM_SET_HOST_MAP_ENTRIES:
__get_user(value, arg_ptr);
iowrite32(value & 0xFFFFFFFF, stream->state->common->bar1_regs
+ stream->regs_base + P40_DMA_DAQ_STREAM_OFF_HOST_MAP_ENTRIES);
break;*/
case P40_STREAM_GET_HOST_BUF_BYTES: //ioctl.pcie`P40_STREAM_GET_HOST_BUF_BYTES`
value = stream->map.size;
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_HOST_BUF_BYTES_USED: //ioctl.pcie`P40_STREAM_GET_HOST_BUF_BYTES_USED`
value = dma_stream_get_bytes_used(stream);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_HOST_MSI_COUNT: //ioctl.pcie`P40_STREAM_GET_HOST_MSI_COUNT`
value = stream->msi_count;
__put_user(value, arg_ptr);
break;
case P40_STREAM_FREE_HOST_BUF_BYTES: //ioctl.pcie`P40_STREAM_FREE_HOST_BUF_BYTES`
{
uint64_t host_buf_bytes = stream->map.size;
uint64_t host_write_off, host_read_off;
__get_user(value, arg_ptr);
spin_lock_irqsave(&stream->off_lock, stream->off_flags);
host_write_off = dma_stream_get_write_off(stream); //TODO: use cached value
host_read_off = dma_stream_get_read_off(stream); //TODO: use cached value
if (host_write_off >= host_read_off) {
value = min(value, host_write_off - host_read_off);
} else {
value = min(value, host_buf_bytes - (host_read_off - host_write_off));
}
dma_stream_set_read_off(stream, (host_read_off + value) % host_buf_bytes);
spin_unlock_irqrestore(&stream->off_lock, stream->off_flags);
__put_user(value, arg_ptr);
break;
}
default:
printk(P40_ERR "invalid ioctl\n", P40_PARM);
return -EINVAL;
}
return 0;
}
//+`dma_mmap`
static int dma_mmap(struct file* filp, struct vm_area_struct* vma)//;?>
{
struct pcie40_dma_stream *stream = filp->private_data;
int ret;
dma_map_print(&stream->map);
ret = dma_map_mmap(&stream->map, vma);
vma->vm_private_data = stream;
return ret;
}
//+`dma_poll`
static unsigned int dma_poll(struct file *filp, poll_table *wait)//;?>
{
struct pcie40_dma_stream *stream = filp->private_data;
uint32_t host_write_off, host_read_off;
spin_lock_irqsave(&stream->off_lock, stream->off_flags);
host_write_off = dma_stream_get_write_off(stream);
host_read_off = stream->read_off;
spin_unlock_irqrestore(&stream->off_lock, stream->off_flags);
//printk(P40_INFO, P40_PARM);
poll_wait(filp, &stream->state->wait, wait);
if (host_write_off != host_read_off)
return POLLIN | POLLRDNORM;
//if (dma-stopped)
// return POLLIN | POLLRDNORM; //POLLHUP;
if (!pcie40_device_accessible(stream->state->common))
return POLLERR;
return 0;
}
//+`dma_open`
static int dma_open(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_dma_stream *stream = container_of(inode->i_cdev, struct pcie40_dma_stream, cdev);
//printk(P40_INFO, P40_PARM);
filp->private_data = stream;
return 0;
}
//+`dma_release`
static int dma_release(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_dma_stream *stream = container_of(inode->i_cdev, struct pcie40_dma_stream, cdev);
//printk(P40_INFO, P40_PARM);
if (filp->private_data != stream) {
printk(P40_DIAG "inconsistent private_data\n", P40_PARM);
return -EINVAL;
}
filp->private_data = NULL;
return 0;
}
static struct file_operations dma_file_ops = {
.owner = THIS_MODULE,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
.ioctl = dma_ioctl,
#else
.unlocked_ioctl = dma_ioctl,
#endif
.mmap = dma_mmap,
.poll = dma_poll,
.open = dma_open,
.release = dma_release,
};
static inline uint32_t pcie40_read32_stream(struct pcie40_dma_stream *stream, unsigned long offset)
{
#ifndef PCIE40_EMU
return ioread32(stream->state->common->bar1_regs + stream->regs_base + offset);
#else
return *(uint32_t *)(stream->state->common->bar1_regs + stream->regs_base + offset);
#endif
}
static inline void pcie40_write32_stream(struct pcie40_dma_stream *stream, unsigned long offset, uint32_t value)
{
#ifndef PCIE40_EMU
iowrite32(value, stream->state->common->bar1_regs + stream->regs_base + offset);
#else
*(uint32_t *)(stream->state->common->bar1_regs + stream->regs_base + offset) = value;
#endif
}
//+`dma_map_mmap`
static int dma_map_mmap(struct pcie40_dma_map* map, struct vm_area_struct *vma)//?>
{
int rc;
int i = 0;
size_t mapped_bytes = 0;
unsigned long mapped_end = vma->vm_start;
printk(P40_INFO "VIRT=%lx PHYS=0x%pad SIZE=%lu\n", P40_PARM,
vma->vm_start, &(map->entries[0].start), vma->vm_end - vma->vm_start);
//TODO: make this mapping read only
if (map->num_entries <= map->max_entries) {
while (mapped_end < vma->vm_end) {
struct pcie40_dma_buffer *buffer = map->entries + i;
rc = remap_pfn_range(vma, mapped_end, buffer->start >> PAGE_SHIFT,
buffer->size, vma->vm_page_prot);
mapped_bytes += buffer->size;
mapped_end += buffer->size;
if (rc) {
printk(P40_DIAG "remap_pfn_range() failed\n", P40_PARM);
//TODO: undo all previous mappings before returning
return rc;
}
i = (i+1) % map->num_entries;
}
}
printk(P40_INFO "mapped %zu bytes\n", P40_PARM, mapped_bytes);
return 0;
}
static int dma_map_munmap(struct pcie40_dma_map* map)
{
return 0;
}
static void dma_map_print(struct pcie40_dma_map* map)
{
printk(P40_DIAG "memory map: %zu entries\n", P40_PARM, map->num_entries);/*
int i;
for (i = 0; i < map->num_entries; ++i) {
struct pcie40_dma_buffer* buffer = map->entries + i;
printk(P40_DIAG " [%3d] base: 0x%pad size: %zu bytes\n", P40_PARM, i,
(void *)dma_map_read_entry_base(map, i),
dma_map_read_entry_size(map, i)
);
}*/
}
static uint32_t dma_stream_get_read_off(struct pcie40_dma_stream *stream)
{
// See regmap`pcie.dma_daq_stream.host_buf_read_off
//TODO: spin lock to serialize access!
return (stream->read_off = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_BUF_READ_OFF));
}
static void dma_stream_set_read_off(struct pcie40_dma_stream *stream, uint32_t read_off)
{
// See regmap`pcie.dma_daq_stream.host_buf_read_off`
//TODO: spin lock to serialize access!
stream->read_off = read_off;
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_BUF_READ_OFF, read_off);
}
static uint32_t dma_stream_get_write_off(struct pcie40_dma_stream *stream)
{
// See regmap`pcie.dma_daq_stream.host_buf_write_off`
//TODO: spin lock to serialize access!
return (stream->write_off = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_BUF_WRITE_OFF));
}
//+`dma_stream_configure`
static int dma_stream_configure(int dev_id, struct pcie40_dma_stream *stream, size_t map_base, size_t map_max_entries, size_t buf_desired_bytes)//;?>
{
int rc;
uint32_t enable;
stream->msi_count = 0;
spin_lock_init(&stream->off_lock);
enable = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_ENABLE);
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_ENABLE, 0);
rc = pcie40_setup_cdev(pcie40_daq_class, &stream->cdev, stream->state->dev_num, stream->cdev_minor, 1, stream->cdev_name, dev_id, &dma_file_ops);
if (rc < 0) {
goto err_setup_cdev;
}
printk(P40_INFO "allocating memory (%lu bytes)", P40_PARM, buf_desired_bytes);
#ifndef PCIE40_EMU
if (dma_map_alloc(stream->state->common->pci_dev, &stream->map, stream->state->common->bar1_regs + map_base, map_max_entries, buf_desired_bytes) < 0) {
goto err_map_alloc;
}
#else
if (dma_map_emu_alloc(&stream->map, stream->state->common->bar1_regs + map_base, map_max_entries, buf_desired_bytes) < 0) {
goto err_map_alloc;
}
#endif
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_MAP_ENTRIES, stream->map.num_entries);
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_ENABLE, enable);
dma_map_print(&stream->map);
return 0;
err_setup_cdev:
err_map_alloc:
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, dev_id, stream->cdev_name);
device_destroy(pcie40_daq_class, MKDEV(MAJOR(stream->state->dev_num), MINOR(stream->state->dev_num)+stream->cdev_minor));
return -1;
}
//+`dma_stream_destroy`
static void dma_stream_destroy(int dev_id, struct pcie40_dma_stream *stream)//;?>
{
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, dev_id, stream->cdev_name);
device_destroy(pcie40_daq_class,
MKDEV(MAJOR(stream->state->dev_num),
MINOR(stream->state->dev_num)+stream->cdev_minor));
printk(P40_INFO "free DMA buffer(s)\n", P40_PARM);
#ifndef PCIE40_EMU
dma_map_free(stream->state->common->pci_dev, &stream->map);
#else
dma_map_emu_free(&stream->map);
#endif
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_MAP_ENTRIES, 0);
}
//+`pcie40_daq_init`
int pcie40_daq_init(void)//;?>
{
int rc = 0;
pcie40_daq_class = class_create(THIS_MODULE, PCIE40_DAQ_CLASS);
if (IS_ERR(pcie40_daq_class)) {
rc = PTR_ERR(pcie40_daq_class);
printk(P40_WARN "failed to register class, %d\n", P40_PARM, rc);
goto err_class_create;
}
//pcie40_daq_class->dev_uevent = pcie40_dev_uevent;
pcie40_daq_class->devnode = pcie40_devnode;
err_class_create:
return rc;
}
//+`pcie40_daq_exit`
void pcie40_daq_exit(void)//;?>
{
class_destroy(pcie40_daq_class);
}
#endif//__PCIE40_DRIVER_DAQ_H
//p40fpga``+
#define P40_FMT "P40ECS:%s(): "
#define PCIE40_ECS_CLASS "pcie40_ecs"
#include "ecs.h"
#include "pcie40_ioctl.h"
//dg`wt.pcie40.fpga.ecs` The ECS submodule implements the system calls behind the BAR0 and BAR2 device files. The submodule is initialized and uninitialized at the same time as the main module, through the p40driver`pcie40_ecs_init` and p40driver`pcie40_ecs_exit` functions. Likewise the ECS-specific probing logic is encapsulated in p40fpga`pcie40_ecs_probe` and p40fpga`pcie40_ecs_remove` .
static void pcie40_ecs_set_drvdata(struct pci_dev *pdev, struct pcie40_ecs_state *state)
{
struct pcie40_state *common = pci_get_drvdata(pdev);
common->ecs_state = state;
common->ecs_state->common = common;
}
static struct pcie40_ecs_state *pcie40_ecs_get_drvdata(struct pci_dev *pdev)
{
struct pcie40_state *common = pci_get_drvdata(pdev);
return common->ecs_state;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
static int ecs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
#else
static long ecs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#endif//+`ecs_ioctl`
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
struct inode *inode = filp->f_inode;
#endif
struct pcie40_ecs_state *state = filp->private_data;
int bar;
switch (iminor(inode)) {
case BAR0_CDEV_MINOR:
bar = 0;
break;
case BAR2_CDEV_MINOR:
bar = 2;
break;
default:
return -EINVAL;
}
//ioctl.pcie`P40_ECS_GET_BAR_SIZE`
if (cmd != P40_ECS_GET_BAR_SIZE) {
printk(P40_DIAG "invalid ioctl command\n", P40_PARM);
return -EINVAL;
}
printk(P40_INFO "ECS BAR size is %lu\n", P40_PARM, state->common->bar_size[bar]);
return state->common->bar_size[bar];
}
//+`ecs_mmap`
static int ecs_mmap(struct file* filp, struct vm_area_struct* vma)//;?>
{
int rc = 0;
struct pcie40_ecs_state *state = filp->private_data;
int bar;
switch (iminor(filp->f_path.dentry->d_inode)) {
case BAR0_CDEV_MINOR:
bar = 0;
break;
case BAR2_CDEV_MINOR:
bar = 2;
break;
default:
return -EINVAL;
}
//vma->vm_flags |= VM_IO | VM_RESERVED; //VM_DONTEXPAND | VM_DONTDUMP; for 3.11
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
printk(P40_INFO "VIRT=%lx PHYS=%lx SIZE=%lu\n", P40_PARM,
vma->vm_start, state->common->bar_start[bar], vma->vm_end - vma->vm_start);
//this needs a more recent kernel than what we have now
//rc = vm_iomap_memory(vma, vma->vm_start, vma->vm_end - vma->vm_start);
rc = io_remap_pfn_range(vma, vma->vm_start,
state->common->bar_start[bar] >> PAGE_SHIFT, vma->vm_end - vma->vm_start,
vma->vm_page_prot);
if (rc) {
printk(P40_DIAG "io_remap_pfn_range()\n", P40_PARM);
return rc;
}
//vma->vm_ops = &bar_vm_ops;
return 0;
}
static struct file_operations ecs_file_ops = {
.owner = THIS_MODULE,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
.ioctl = ecs_ioctl,
#else
.unlocked_ioctl = ecs_ioctl,
#endif
.mmap = ecs_mmap,
.open = ecs_open,
.release = ecs_release,
};
//+`pcie40_ecs_probe` Initialize ECS BARs and create device files.
int pcie40_ecs_probe(struct pci_dev *dev, const struct pci_device_id *id)//;?>
{
int rc = 0;
struct pcie40_state *common;
struct pcie40_ecs_state *state = NULL;
common = pci_get_drvdata(dev);
//? This function allocates a p40driver`pcie40_ecs_state` instance to keep ECS-specific state.
state = kzalloc(sizeof(struct pcie40_ecs_state), GFP_KERNEL);
if (IS_ERR(state)) {
printk(P40_ERR "kzalloc()\n", P40_PARM);
rc = PTR_ERR(state);
goto err_kzalloc;
}
state->common = common;
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
//? It then requests exclusive access to the ECS BARs
rc = pci_request_selected_regions_exclusive(dev, P40_ECS_BARS_MASK, P40_DRV_NAME);
if (rc) {
printk(P40_WARN "unable to reserve ECS regions\n", P40_PARM);
goto err_pci_request_regions_ecs;
}
//? and allocates a range of minor numbers for its character devices.
rc = alloc_chrdev_region(&(state->dev_num), P40_ECS_CDEV_BASEMINOR, P40_ECS_CDEV_COUNT, P40_DRV_NAME);
if (rc < 0) {
printk(P40_ERR "alloc_chrdev_region()\n", P40_PARM);
goto err_alloc_chrdev_region;
}
//? One such device is created for BAR0
if (state->common->bar_size[0]) {
rc = pcie40_setup_cdev(pcie40_ecs_class, &(state->bar0_cdev), state->dev_num, BAR0_CDEV_MINOR, 0, BAR0_CDEV_NAME, state->common->dev_id, &ecs_file_ops);
if (rc < 0) {
goto err_bar0_dev;
}
}
//? and a second one for BAR2.
if (state->common->bar_size[2]) {
rc = pcie40_setup_cdev(pcie40_ecs_class, &(state->bar2_cdev), state->dev_num, BAR2_CDEV_MINOR, 2, BAR2_CDEV_NAME, state->common->dev_id, &ecs_file_ops);
if (rc < 0) {
goto err_bar2_dev;
}
}
pcie40_ecs_set_drvdata(dev, state);
return rc;
err_bar2_dev:
if (state->common->bar_size[0]) {
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, BAR0_CDEV_NAME);
device_destroy(pcie40_ecs_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+BAR0_CDEV_MINOR));
}
err_bar0_dev:
unregister_chrdev_region(state->dev_num, P40_ECS_CDEV_COUNT);
err_alloc_chrdev_region:
pci_release_selected_regions(dev, P40_ECS_BARS_MASK);
err_pci_request_regions_ecs:
kfree(state);
err_kzalloc:
return rc;
}
//+`pcie40_ecs_remove` Destroy ECS device files.
void pcie40_ecs_remove(struct pci_dev *dev)//;?>
{
struct pcie40_ecs_state *state;
printk(P40_DIAG "pci_dev = 0x%p\n", P40_PARM, dev);
state = pcie40_ecs_get_drvdata(dev);
if (!dev || !state) {
printk(P40_DIAG "remove(dev = 0x%p) dev->driver_data = 0x%p\n", P40_PARM, dev, state);
return;
}
if (state->common->bar_size[2]) {
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, BAR2_CDEV_NAME);
device_destroy(pcie40_ecs_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+BAR2_CDEV_MINOR));
}
if (state->common->bar_size[0]) {
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, BAR0_CDEV_NAME);
device_destroy(pcie40_ecs_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+BAR0_CDEV_MINOR));
}
unregister_chrdev_region(state->dev_num, P40_ECS_CDEV_COUNT);
printk(P40_INFO "releasing PCI regions\n", P40_PARM);
pci_release_selected_regions(dev, P40_ECS_BARS_MASK);
kfree(state);
}
#ifndef __PCIE40_DRIVER_ECS_H
#define __PCIE40_DRIVER_ECS_H
//p40driver``+
#include "pcie40_driver_common.h"
#include <linux/fs.h>
#define P40_ECS_CDEV_BASEMINOR (0)
#define P40_ECS_CDEV_COUNT (2)
#define P40_ECS_BARS_MASK ((1<<0)|(1<<2)) //BAR0, BAR2
//ug`pcie40_driver.files`bar0 _ /dev/pcie40_?_bar0::
// Device to access user registers on the FPGA.
#define BAR0_CDEV_MINOR (P40_ECS_CDEV_BASEMINOR + 0)
static const char BAR0_CDEV_NAME[] = "bar0";
//ug`pcie40_driver.files`bar2 _ /dev/pcie40_?_bar2::
// Device to access low-level registers on the FPGA.
#define BAR2_CDEV_MINOR (P40_ECS_CDEV_BASEMINOR + 1)
static const char BAR2_CDEV_NAME[] = "bar2";
//+`pcie40_ecs_state`
struct pcie40_ecs_state {
struct pcie40_state *common;
dev_t dev_num; //base MAJOR/MINOR numbers for device files
struct cdev bar0_cdev;
struct cdev bar2_cdev;
};
//+`ecs_open`
static int ecs_open(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_ecs_state *state = NULL;
switch (iminor(inode)) {
case BAR0_CDEV_MINOR:
printk(P40_INFO "BAR0", P40_PARM);
state = container_of(inode->i_cdev, struct pcie40_ecs_state, bar0_cdev);
break;
case BAR2_CDEV_MINOR:
printk(P40_INFO "BAR2", P40_PARM);
state = container_of(inode->i_cdev, struct pcie40_ecs_state, bar2_cdev);
break;
default:
printk(P40_INFO "invalid BAR", P40_PARM);
return -EINVAL;
}
filp->private_data = state;
return 0;
}
//+`ecs_release`
static int ecs_release(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_ecs_state *state = NULL;
switch (iminor(inode)) {
case BAR0_CDEV_MINOR:
printk(P40_INFO "BAR0", P40_PARM);
state = container_of(inode->i_cdev, struct pcie40_ecs_state, bar0_cdev);
break;
case BAR2_CDEV_MINOR:
printk(P40_INFO "BAR2", P40_PARM);
state = container_of(inode->i_cdev, struct pcie40_ecs_state, bar2_cdev);
break;
default:
printk(P40_INFO "invalid BAR", P40_PARM);
return -EINVAL;
}
if (filp->private_data != state) {
printk(P40_ERR "inconsistent private_data\n", P40_PARM);
return -EINVAL;
}
filp->private_data = NULL;
return 0;
}
static struct class *pcie40_ecs_class = NULL;
//+`pcie40_ecs_init` Register ECS device class.
int pcie40_ecs_init(void)//;?>
{
int rc = 0;
//? This functions registers a dedicated device class used to create ECS device files.
pcie40_ecs_class = class_create(THIS_MODULE, PCIE40_ECS_CLASS);
if (IS_ERR(pcie40_ecs_class)) {
rc = PTR_ERR(pcie40_ecs_class);
printk(P40_WARN "failed to register class, %d\n", P40_PARM, rc);
goto err_class_create;
}
//pcie40_ecs_class->dev_uevent = pcie40_dev_uevent;
pcie40_ecs_class->devnode = pcie40_devnode;
err_class_create:
return rc;
}
//+`pcie40_ecs_exit` Destroy ECS device class.
void pcie40_ecs_exit(void)//;?>
{
class_destroy(pcie40_ecs_class);
}
#endif//__PCIE40_DRIVER_ECS_H
//p40fpga``+
//ug`pcie40_driver.description`
// ``pcie40.ko`` is the driver for the PCIe40 data acquisition card.
//dg`wt.pcie40.fpga.init` When the kernel module is loaded, p40fpga`pcie40_init` is immediately called. Its dual at unload time is p40fpga`pcie40_exit` . The actual PCI device management happens in p40fpga`pcie40_probe` and p40fpga`pcie40_remove` .
#define P40_FMT "P40:%s(): "
#include "pcie40_driver_common.h"
#include <linux/init.h>
#include <linux/module.h>
//ug`pcie40_driver.synopsis`
// modprobe *pcie40* [ mainmibs=_M_ ] [ metamibs=_M_ ]
static LIST_HEAD(pcie40_inst_list);
static DEFINE_SPINLOCK(pcie40_inst_list_lock);
static unsigned long pcie40_inst_list_lock_flags;
//+`pcie40_ids` <?
static const struct pci_device_id pcie40_ids[] = {
{ PCI_DEVICE(0x10DC, 0xCE40), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pcie40_ids);//?>
//ug`pcie40_driver.sysfs`link _ /sys/devices/.../pcie40_link::
// Link identifier for this interface within one PCIe40 board (0 for the primary and 1 for the secondary).
static ssize_t attr_show_link(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pci = container_of(dev, struct pci_dev, dev);
struct pcie40_state *state = pci_get_drvdata(pci);
return sprintf(buf, "%d", state ? state->link_id : -1);
}
static DEVICE_ATTR(pcie40_link, S_IRUGO, attr_show_link, NULL);
//ug`pcie40_driver.sysfs`interface _ /sys/devices/.../pcie40_interface::
// Interface identifier allocated by the driver, this value uniquely identifies a PCIe40 interface within the machine.
static ssize_t attr_show_interface(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pci = container_of(dev, struct pci_dev, dev);
struct pcie40_state *state = pci_get_drvdata(pci);
return sprintf(buf, "%d", state ? state->dev_id : -1);
}
static DEVICE_ATTR(pcie40_interface, S_IRUGO, attr_show_interface, NULL);
//ug`pcie40_driver.sysfs`loaded _ /sys/devices/.../pcie40_loaded::
// 1 if the FPGA BARs are readable, 0 if the FPGA has been reprogrammed and the driver must be reloaded.
static ssize_t attr_show_loaded(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pci = container_of(dev, struct pci_dev, dev);
struct pcie40_state *state = pci_get_drvdata(pci);
return sprintf(buf, "%d", pcie40_device_accessible(state) ? 1 : 0);
}
static DEVICE_ATTR(pcie40_loaded, S_IRUGO, attr_show_loaded, NULL);
int pcie40_ecs_init(void);
void pcie40_ecs_exit(void);
int pcie40_ecs_probe(struct pci_dev *dev, const struct pci_device_id *id);
void pcie40_ecs_remove(struct pci_dev *dev);
int pcie40_daq_init(void);
void pcie40_daq_exit(void);
int pcie40_daq_probe(struct pci_dev *dev, const struct pci_device_id *id);
void pcie40_daq_remove(struct pci_dev *dev);
//+`pcie40_probe` Scan PCI bus to detect PCIe40 board.
static int pcie40_probe(struct pci_dev *dev, const struct pci_device_id *id)//;?>
{
int rc = 0;
struct pcie40_state *state = NULL, *li, *lp = NULL;
int bar;
printk(P40_DIAG "found PCI device, vendor: %08X device: %08X\n", P40_PARM, id->vendor, id->device);
//? This function allocates a p40driver`pcie40_state` instance used to track the state of this device within the kernel.
state = kzalloc(sizeof(struct pcie40_state), GFP_KERNEL);
if (IS_ERR(state)) {
printk(P40_ERR "kzalloc()\n", P40_PARM);
rc = PTR_ERR(state);
goto err_kzalloc;
}
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
INIT_LIST_HEAD(&state->list);
rc = pci_enable_device(dev);
if (rc) {
printk(P40_DIAG "pci_enable_device() failed\n", P40_PARM);
goto err_pci_enable_device;
}
pci_set_master(dev); // Without this, no interrupts will be received!!!
//"The key difference that _exclusive makes it that userspace is explicitly not allowed to map the resource via /dev/mem or sysfs."
rc = pci_request_selected_regions_exclusive(dev, P40_COMMON_BARS_MASK, P40_DRV_NAME);
if (rc) {
printk(P40_WARN "unable to reserve DAQ regions\n", P40_PARM);
goto err_pci_request_regions_daq;
}
printk(P40_INFO "initializing BARs\n", P40_PARM);
//? The state is initialized with the position and size of all PCI BARs.
for (bar = 0; bar < P40_MAX_BAR; ++bar) {
state->bar_start[bar] = pci_resource_start(dev, bar);
state->bar_size[bar] = pci_resource_len(dev, bar);
//TODO: print BAR information
}
if (!state->bar_size[1]) {
printk(P40_ERR "no BAR1 detected!\n", P40_PARM);
rc = -1;
goto err_no_bar1;
}
//? BAR0, if present, is mapped inside the kernel to be accessible by the SCA interface (in addition, both BAR0 and BAR2 are accessible by userspace via memory mapped access).
if (state->bar_start[0] && state->bar_size[0]) {
printk(P40_INFO "pci_iomap() BAR0 (%lu bytes)\n", P40_PARM, state->bar_size[1]);
state->bar0_regs = pci_iomap(dev, 0, state->bar_size[0]);
if (state->bar0_regs == NULL) {
rc = -1;
goto err_bar0_iomap;
}
}
//? BAR1 is always mapped inside the kernel as it's used directly by DAQ interface.
printk(P40_INFO "pci_iomap() BAR1 (%lu bytes)\n", P40_PARM, state->bar_size[1]);
state->bar1_regs = pci_iomap(dev, 1, state->bar_size[1]);
if (state->bar1_regs == NULL) {
rc = -1;
goto err_bar1_iomap;
}
//? Using this mapping, the driver ensures that PCIe registers on the FPGA can be accessed.
if (!pcie40_device_accessible(state)) {
rc = -1;
printk(P40_ERR "Device detected but unreadable, please re-enumerate bus to continue\n", P40_PARM);
goto err_access;
}
spin_lock_irqsave(&pcie40_inst_list_lock, pcie40_inst_list_lock_flags);
//? Then it reads the regmap`pcie.dma_ctrl.link_id` register to identify which PCIe link from the FPGA is being probed.
state->link_id = pcie40_read32_ctrl(state, P40_DMA_CTRL_OFF_LINK_ID);
//? Using this information, a unique interface identifier is allocated to the PCIe link.
if (state->link_id == 0) {
state->dev_id = 0; //? Interfaces with PCIe link 0 get an even interface id.
} else {
state->dev_id = 1; //? Interfaces on PCIe link 1 get an odd interface id.
}
//? The driver always allocates the lowest available interface id.
list_for_each_entry(li, &pcie40_inst_list, list) {
if ((state->dev_id & 1) == (li->dev_id & 1)) {
if (state->dev_id == li->dev_id) {
state->dev_id += 2;
}
}
if (lp) {
if (state->dev_id < lp->dev_id) {
list_add_tail(&state->list, &lp->list);
break;
}
}
if (state->dev_id < li->dev_id) {
list_add_tail(&state->list, &li->list);
break;
}
lp = li;
}
if (list_empty(&state->list)) {
list_add_tail(&state->list, &pcie40_inst_list);
}
spin_unlock_irqrestore(&pcie40_inst_list_lock, pcie40_inst_list_lock_flags);
state->pci_dev = dev;
pci_set_drvdata(dev, state);
//? Finally it calls the probing logic of the subdrivers via p40fpga`pcie40_ecs_probe` and p40fpga`pcie40_daq_probe` .
pcie40_ecs_probe(dev, id);
pcie40_daq_probe(dev, id);
device_create_file(&dev->dev, &dev_attr_pcie40_link);
device_create_file(&dev->dev, &dev_attr_pcie40_interface);
device_create_file(&dev->dev, &dev_attr_pcie40_loaded);
//? After initializing the subdrivers, this function always returns success, this is to ensure that p40fpga`pcie40_remove` is always called also in case only some subdrivers are loaded.
return 0;
err_access:
if (state->bar_size[1]) {
iounmap(state->bar1_regs);
state->bar1_regs = NULL;
}
err_bar1_iomap:
if (state->bar_size[0]) {
iounmap(state->bar0_regs);
state->bar0_regs = NULL;
}
err_bar0_iomap:
err_no_bar1:
pci_release_selected_regions(dev, P40_COMMON_BARS_MASK);
err_pci_request_regions_daq:
pci_disable_device(dev);
err_pci_enable_device:
kfree(state);
err_kzalloc:
return rc;
}
//+`pcie40_remove` Remove PCIe40 board from kernel.
static void pcie40_remove(struct pci_dev *dev)//;?>
{
struct pcie40_state *state = pci_get_drvdata(dev);
spin_lock_irqsave(&pcie40_inst_list_lock, pcie40_inst_list_lock_flags);
list_del(&state->list);
spin_unlock_irqrestore(&pcie40_inst_list_lock, pcie40_inst_list_lock_flags);
device_remove_file(&dev->dev, &dev_attr_pcie40_loaded);
device_remove_file(&dev->dev, &dev_attr_pcie40_interface);
device_remove_file(&dev->dev, &dev_attr_pcie40_link);
//? First the submodules are uninitialized using p40fpga`pcie40_daq_remove` and p40fpga`pcie40_ecs_remove` .
pcie40_daq_remove(dev);
pcie40_ecs_remove(dev);
//? BAR0 and BAR1 are unmapped using ``iounmap``.
if (state->bar_size[0]) {
iounmap(state->bar0_regs);
state->bar0_regs = NULL;
}
if (state->bar_size[1]) {
iounmap(state->bar1_regs);
state->bar1_regs = NULL;
}
printk(P40_INFO "releasing PCI regions\n", P40_PARM);
pci_release_selected_regions(dev, P40_COMMON_BARS_MASK);
//? Finally the PCI device is disabled
pci_disable_device(dev);
//? and the p40driver`pcie40_state` memory is freed.
kfree(state);
}
static struct pci_driver pcie40_pci_driver = {
.name = P40_DRV_NAME,
.id_table = pcie40_ids,
.probe = pcie40_probe,
.remove = pcie40_remove,
};
//+`pcie40_init` Initialize subdrivers and register PCIe driver with kernel.
static int __init pcie40_init(void)//;?>
{
int rc = 0;
//? The first module to be initialized is the ECS, using p40driver`pcie40_ecs_init` .
rc = pcie40_ecs_init();
if (rc < 0)
return rc;
//? Followed by the DAQ, using p40driver`pcie40_daq_init` .
rc = pcie40_daq_init();
if (rc < 0)
return rc;
//? The driver is registered with the kernel using ``pci_register_driver``, its argument also contains the PCI device ids that correspond to the PCIe40 firmware (see p40fpga`pcie40_ids` ).
rc = pci_register_driver(&pcie40_pci_driver);
if (rc < 0)
return rc;
return rc;
}
//+`pcie40_exit` Unregister PCIe driver and uninitialize subdrivers.
static void __exit pcie40_exit(void)//?>
{
pci_unregister_driver(&pcie40_pci_driver);
pcie40_daq_exit();
pcie40_ecs_exit();
}
//+`pcie40_init`
module_init(pcie40_init);
//+`pcie40_exit`
module_exit(pcie40_exit);
MODULE_VERSION(DAQ40_VER_REL);
MODULE_LICENSE("GPL");
//TODO: MODULE_AUTHOR
//TODO: MODULE_DESCRIPTION
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment