Skip to content
Snippets Groups Projects
Commit c8e20d0b authored by Patrick Robbe's avatar Patrick Robbe
Browse files

Remove old file

parent dc8290c4
No related branches found
No related tags found
No related merge requests found
#ifndef __PCIE40_DRIVER_DAQ_H
#define __PCIE40_DRIVER_DAQ_H
//p40driver``+
#include "common.h"
#include <linux/fs.h>
#include <linux/mm_types.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include "pcie40_ioctl.h"
#define P40_DAQ_CDEV_BASEMINOR (2)
#define P40_DAQ_CDEV_COUNT (4)
#define MAIN_MAP_MAX_ENTRIES (1024) // This should be read from the FPGA
//ug`pcie40_driver.options`mainmibs *mainmibs* = _M_::
// Amount of memory (in MiBs) to allocate for each _main_ stream circular buffer. The driver will try to allocate this much memory but the final DMA buffer might be smaller in case of memory pressure from the DMA allocator. The default (and maximum) value is 4096.
#define MAIN_BUF_MIBS_MAX (4096)
static int mainmibs = MAIN_BUF_MIBS_MAX;
module_param(mainmibs, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mainmibs, "Desired size (MiB) of main stream circular buffer");
#define META_MAP_MAX_ENTRIES (32) // This should be read from the FPGA
//ug`pcie40_driver.options`metamibs *metamibs* = _M_::
// Amount of memory (in MiBs) to allocate for each _meta_ stream circular buffer. The default (and maximum) value is 128.
#define META_BUF_MIBS_MAX (128) // TODO: this could potentially be reduced to 64
static int metamibs = META_BUF_MIBS_MAX;
module_param(metamibs, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(metamibs, "Desired size (MiB) of meta stream circular buffer");
//ug`pcie40_driver.files`ctrl _ /dev/pcie40_?_ctrl::
// Device to access the DMA controller.
#define CTRL_CDEV_MINOR (P40_DAQ_CDEV_BASEMINOR + 0)
static const char CTRL_CDEV_NAME[] = "ctrl";
//ug`pcie40_driver.files`main _ /dev/pcie40_?_main::
// Device to access the _main_ DMA stream.
#define MAIN_CDEV_MINOR (P40_DAQ_CDEV_BASEMINOR + 1)
static const char MAIN_CDEV_NAME[] = "main";
//ug`pcie40_driver.files`meta _ /dev/pcie40_?_meta::
// Device to access the _meta_ DMA stream.
#define META_CDEV_MINOR (P40_DAQ_CDEV_BASEMINOR + 2)
static const char META_CDEV_NAME[] = "meta";
////ug`pcie40_driver.files`odin _ /dev/pcie40_?_odin::
//// Device to access the _odin_ DMA stream.
#define ODIN_CDEV_MINOR (P40_DAQ_CDEV_BASEMINOR + 3)
static const char ODIN_CDEV_NAME[] = "odin";
struct pcie40_dma_buffer {
void *ptr;
dma_addr_t start;
size_t size;
};
struct pcie40_dma_map {
void __iomem* base;
size_t max_entries;
size_t num_entries;
struct pcie40_dma_buffer *entries;
size_t size;
};
struct pcie40_daq_state;
//+`pcie40_dma_stream` Kernelspace representation of a DMA stream between the PCIe40 FPGA and upstream memory.
struct pcie40_dma_stream {
const char *cdev_name;
int8_t cdev_minor;
struct cdev cdev;
struct pcie40_dma_map map;
size_t regs_base;
uint32_t write_off;
uint32_t read_off;
int msi;
ktime_t msi_last;
ktime_t msi_delay;
int msi_count;
// This lock prevents the read pointer to be changed when we're calculating how much data is available in the circular buffer (the write buffer can be advanced by the FPGA but this does not invalidate the results)
spinlock_t off_lock;
unsigned long off_flags;
struct pcie40_daq_state *state;
};
struct pcie40_daq_state {
struct pcie40_state *common;
int msi_base;
size_t msi_span;
wait_queue_head_t wait;
dev_t dev_num; //base MAJOR/MINOR numbers for device files
struct cdev ctrl_cdev;
struct pcie40_dma_stream main_stream;
struct pcie40_dma_stream meta_stream;
struct pcie40_dma_stream odin_stream;
u8 irq_line;
u8 irq_pin;
#ifdef PCIE40_EMU
struct task_struct *emu_thread;
#endif
};
static struct class *pcie40_daq_class = NULL;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
static int ctrl_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
#else
static long ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#endif //+`ctrl_ioctl`
{
struct pcie40_daq_state *state = filp->private_data;
uint64_t __user *arg_ptr = (uint64_t __user *)arg;
int err = 0;
uint64_t value;
if (_IOC_DIR(cmd) & _IOC_READ)
err = !access_ok(VERIFY_WRITE, arg_ptr, _IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
err = !access_ok(VERIFY_READ, arg_ptr, _IOC_SIZE(cmd));
if (err) return -EFAULT;
switch(cmd) {
case P40_CTRL_GET_RWTEST: //ioctl.pcie`P40_CTRL_GET_RWTEST`
// See regmap`pcie.dma_ctrl.rwtest` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_RWTEST);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_RWTEST: //ioctl.pcie`P40_CTRL_SET_RWTEST`
// See regmap`pcie.dma_ctrl.rwtest` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_RWTEST, value);
break;
case P40_CTRL_GET_VERSION: //ioctl.pcie`P40_CTRL_GET_VERSION`
// See regmap`pcie.dma_ctrl.version` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_VERSION);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_LINK_ID: //ioctl.pcie`P40_CTRL_GET_LINK_ID`
// The value returned by this IOCTL encodes both the PCI topological address and the FPGA interface number, as follows:
// _ bits [31..24]::
// PCI bus number
// _ bits [23..16]::
// PCI slot number
// _ bits [15..8]::
// PCI function number
// _ bits [7..0]::
// FPGA-PCIe interface number within a given board (0 for the primary link and 1 for the secondary)
value = state->common->link_id;
if (state->common->pci_dev) {
value |= state->common->pci_dev->bus->number << 24;
value |= PCI_SLOT(state->common->pci_dev->devfn) << 16;
value |= PCI_FUNC(state->common->pci_dev->devfn) << 8;
}
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHIP_ID: //ioctl.pcie`P40_CTRL_GET_CHIP_ID`
// See regmap`pcie.dma_ctrl.chip_id` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHIP_ID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHIP_ID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_RESET: //ioctl.pcie`P40_CTRL_GET_RESET`
// See regmap`pcie.dma_ctrl.reset` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_RESET);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_RESET: //ioctl.pcie`P40_CTRL_SET_RESET`
// See regmap`pcie.dma_ctrl.reset` .
__get_user(value, arg_ptr);
if (value) {
state->main_stream.msi_count = 0;
state->meta_stream.msi_count = 0;
}
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_RESET, value);
break;
case P40_CTRL_GET_ERROR: //ioctl.pcie`P40_CTRL_GET_ERROR`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_ERROR);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_MAIN_GEN_CTL: //ioctl.pcie`P40_CTRL_GET_MAIN_GEN_CTL`
// See regmap`pcie.dma_ctrl.main_gen_ctl` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_CTL);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_MAIN_GEN_CTL: //ioctl.pcie`P40_CTRL_SET_MAIN_GEN_CTL`
// See regmap`pcie.dma_ctrl.main_gen_ctl` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_CTL, value);
break;
case P40_CTRL_GET_MAIN_GEN_FIXED: //ioctl.pcie`P40_CTRL_GET_MAIN_GEN_FIXED`
// See regmap`pcie.dma_ctrl.main_gen_fixed` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_FIXED);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_MAIN_GEN_FIXED: //ioctl.pcie`P40_CTRL_SET_MAIN_GEN_FIXED`
// See regmap`pcie.dma_ctrl.main_gen_fixed` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_FIXED, value);
break;
case P40_CTRL_GET_MAIN_RAW_MODE: //ioctl.pcie`P40_CTRL_GET_MAIN_RAW_MODE`
// See regmap`pcie.dma_ctrl.main_raw_mode` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_RAW_MODE);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_META_PACKING: //ioctl.pcie`P40_CTRL_GET_META_PACKING`
// See regmap`pcie.dma_ctrl.meta_packing` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_META_PACKING);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_META_PACKING: //ioctl.pcie`P40_CTRL_SET_META_PACKING`
__get_user(value, arg_ptr);
// See regmap`pcie.dma_ctrl.meta_packing` .
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_META_PACKING, value);
break;
case P40_CTRL_GET_PCIE_GEN: //ioctl.pcie`P40_CTRL_GET_PCIE_GEN`
// See regmap`pcie.dma_ctrl.pcie_gen` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_PCIE_GEN);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_MSI_MODE: //ioctl.pcie`P40_CTRL_GET_MSI_MODE`
// See regmap`pcie.dma_ctrl.msi_mode` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MSI_MODE);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_MSI_MODE: //ioctl.pcie`P40_CTRL_SET_MSI_MODE`
// See regmap`pcie.dma_ctrl.msi_mode` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MSI_MODE, value);
break;
case P40_CTRL_GET_MAIN_MSI_BYTES: //ioctl.pcie`P40_CTRL_GET_MAIN_MSI_BYTES`
// See regmap`pcie.dma_ctrl.main_msi_bytes` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_MSI_BYTES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_MAIN_MSI_BYTES: //ioctl.pcie`P40_CTRL_SET_MAIN_MSI_BYTES`
__get_user(value, arg_ptr);
// See regmap`pcie.dma_ctrl.main_msi_bytes` .
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_MSI_BYTES, value);
break;
case P40_CTRL_GET_MAIN_MSI_CYCLES: //ioctl.pcie`P40_CTRL_GET_MAIN_MSI_CYCLES`
// See regmap`pcie.dma_ctrl.main_msi_cycles` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_MSI_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_META_MSI_BYTES: //ioctl.pcie`P40_CTRL_GET_META_MSI_BYTES`
// See regmap`pcie.dma_ctrl.meta_msi_bytes` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_META_MSI_BYTES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_META_MSI_BYTES: //ioctl.pcie`P40_CTRL_SET_META_MSI_BYTES`
// See regmap`pcie.dma_ctrl.meta_msi_bytes` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_META_MSI_BYTES, value);
break;
case P40_CTRL_GET_META_MSI_CYCLES: //ioctl.pcie`P40_CTRL_GET_META_MSI_CYCLES`
// See regmap`pcie.dma_ctrl.meta_msi_cycles` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_META_MSI_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_META_MSI_BLOCKS: //ioctl.pcie`P40_CTRL_GET_META_MSI_BLOCKS`
// See regmap`pcie.dma_ctrl.meta_msi_blocks` .
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_META_MSI_BLOCKS);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_META_MSI_BLOCKS: //ioctl.pcie`P40_CTRL_SET_META_MSI_BLOCKS`
// See regmap`pcie.dma_ctrl.meta_msi_blocks` .
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_META_MSI_BLOCKS, value);
break;
case P40_CTRL_GET_HOST_MAIN_MSI_NSECS: //ioctl.pcie`P40_CTRL_GET_HOST_MAIN_MSI_NSECS`
value = ktime_to_ns(state->main_stream.msi_delay);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_HOST_META_MSI_NSECS: //ioctl.pcie`P40_CTRL_GET_HOST_META_MSI_NSECS`
value = ktime_to_ns(state->meta_stream.msi_delay);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_INBUF_SIZE: //ioctl.pcie`P40_CTRL_GET_INBUF_SIZE`
// See regmap`pcie.dma_ctrl.inbuf_size`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_INBUF_SIZE);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_INBUF_FILL: //ioctl.pcie`P40_CTRL_GET_INBUF_FILL`
// See regmap`pcie.dma_ctrl.inbuf_fill`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_INBUF_FILL);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_TRUNC_THRES: //ioctl.pcie`P40_CTRL_GET_TRUNC_THRES`
// See regmap`pcie.dma_ctrl.trunc_thres`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_THRES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_SET_TRUNC_THRES: //ioctl.pcie`P40_CTRL_SET_TRUNC_THRES`
// See regmap`pcie.dma_ctrl.trunc_thres`
__get_user(value, arg_ptr);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_THRES, value);
break;
case P40_CTRL_GET_TRUNC_TOTAL_CYCLES: //ioctl.pcie`P40_CTRL_GET_TRUNC_TOTAL_CYCLES`
// See regmap`pcie.dma_ctrl.trunc_total_cycles`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_TOTAL_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_TRUNC_TOTAL_SINCE_EVID: //ioctl.pcie`P40_CTRL_GET_TRUNC_TOTAL_SINCE_EVID`
// See regmap`pcie.dma_ctrl.trunc_total_since_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_TOTAL_SINCE_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_TOTAL_SINCE_EVID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_TRUNC_LAST_CYCLES: //ioctl.pcie`P40_CTRL_GET_TRUNC_LAST_CYCLES`
// See regmap`pcie.dma_ctrl.trunc_last_cycles`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_LAST_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_TRUNC_LAST_FROM_EVID: //ioctl.pcie`P40_CTRL_GET_TRUNC_LAST_FROM_EVID`
// See regmap`pcie.dma_ctrl.trunc_last_from_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_LAST_FROM_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_LAST_FROM_EVID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_TRUNC_LAST_TO_EVID: //ioctl.pcie`P40_CTRL_GET_TRUNC_LAST_TO_EVID`
// See regmap`pcie.dma_ctrl.trunc_last_to_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_LAST_TO_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_TRUNC_LAST_TO_EVID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHOKE_TOTAL_CYCLES: //ioctl.pcie`P40_CTRL_GET_CHOKE_TOTAL_CYCLES`
// See regmap`pcie.dma_ctrl.choke_total_cycles`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_TOTAL_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHOKE_TOTAL_SINCE_EVID: //ioctl.pcie`P40_CTRL_GET_CHOKE_TOTAL_SINCE_EVID`
// See regmap`pcie.dma_ctrl.choke_total_since_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_TOTAL_SINCE_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_TOTAL_SINCE_EVID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHOKE_LAST_CYCLES: //ioctl.pcie`P40_CTRL_GET_CHOKE_LAST_CYCLES`
// See regmap`pcie.dma_ctrl.choke_last_cycles`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_LAST_CYCLES);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHOKE_LAST_FROM_EVID: //ioctl.pcie`P40_CTRL_GET_CHOKE_LAST_FROM_EVID`
// See regmap`pcie.dma_ctrl.choke_last_from_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_LAST_FROM_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_LAST_FROM_EVID_LO);
__put_user(value, arg_ptr);
break;
case P40_CTRL_GET_CHOKE_LAST_TO_EVID: //ioctl.pcie`P40_CTRL_GET_CHOKE_LAST_TO_EVID`
// See regmap`pcie.dma_ctrl.choke_last_to_evid`
value = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_LAST_TO_EVID_HI);
value <<= 32;
value |= pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_CHOKE_LAST_TO_EVID_LO);
__put_user(value, arg_ptr);
break;
/* unfortunately this is only possible in linux 4.x
case P40_CTL_GET_GEN:
switch (state->common->pci_dev->bus->cur_bus_speed) {
case PCIE_SPEED_2_5GT:
value = 1;
break;
case PCIE_SPEED_5_0GT:
value = 2;
break;
case PCIE_SPEED_8_0GT:
value = 3;
break;
default:
value = 0;
break;
}
__put_user(value, arg_ptr);
break;
*/
default:
printk(P40_ERR "invalid ioctl\n", P40_PARM);
return -EINVAL;
}
return 0;
}
static unsigned int ctrl_poll(struct file *file, poll_table *wait)
{
// Make sure device is in MSI_DAQ mode
// (Do we really want to do this at every poll()? Well it's just a few ns,
// and it's still better than doing it in the isr)
printk(P40_DIAG, P40_PARM);
return 0;
}
//+`ctrl_open`
static int ctrl_open(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_daq_state *state = container_of(inode->i_cdev, struct pcie40_daq_state, ctrl_cdev);
//printk(P40_INFO, P40_PARM);
filp->private_data = state;
return pcie40_device_accessible(state->common) ? 0 : -EIO;
}
//+`ctrl_release`
static int ctrl_release(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_daq_state *state = container_of(inode->i_cdev, struct pcie40_daq_state, ctrl_cdev);
//printk(P40_INFO, P40_PARM);
if (filp->private_data != state) {
printk(P40_DIAG "inconsistent private_data\n", P40_PARM);
return -EINVAL;
}
filp->private_data = NULL;
return 0;
}
static struct file_operations ctrl_file_ops = {
.owner = THIS_MODULE,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
.ioctl = ctrl_ioctl,
#else
.unlocked_ioctl = ctrl_ioctl,
#endif
.open = ctrl_open,
.release = ctrl_release,
.poll = ctrl_poll,
};
static int dma_map_mmap(struct pcie40_dma_map* map, struct vm_area_struct *vma);
static int dma_map_munmap(struct pcie40_dma_map* map);
static void dma_map_print(struct pcie40_dma_map* map);
static uint32_t dma_stream_get_read_off(struct pcie40_dma_stream *stream);
static void dma_stream_set_read_off(struct pcie40_dma_stream *stream, uint32_t read_off);
static uint32_t dma_stream_get_write_off(struct pcie40_dma_stream *stream);
static inline size_t dma_stream_get_bytes_used(struct pcie40_dma_stream *stream)
{
uint64_t host_buf_bytes = stream->map.size;
uint64_t host_write_off, host_read_off;
spin_lock_irqsave(&stream->off_lock, stream->off_flags);
host_write_off = dma_stream_get_write_off(stream); //TODO: use cached value
host_read_off = dma_stream_get_read_off(stream); //TODO: use cached value
spin_unlock_irqrestore(&stream->off_lock, stream->off_flags);
if (host_write_off >= host_read_off) {
return host_write_off - host_read_off;
} else {
return host_buf_bytes - (host_read_off - host_write_off);
}
}
static inline size_t dma_stream_get_bytes_free(struct pcie40_dma_stream *stream)
{
return stream->map.size - dma_stream_get_bytes_used(stream);
}
static inline uint32_t pcie40_read32_stream(struct pcie40_dma_stream *stream, unsigned long offset);
static inline void pcie40_write32_stream(struct pcie40_dma_stream *stream, unsigned long offset, uint32_t value);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
static int dma_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
#else
static long dma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#endif
//+`dma_ioctl`
{
struct pcie40_dma_stream *stream = filp->private_data;
uint64_t __user *arg_ptr = (uint64_t __user *)arg;
int err = 0;
uint64_t value;
if (_IOC_DIR(cmd) & _IOC_READ)
err = !access_ok(VERIFY_WRITE, arg_ptr, _IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
err = !access_ok(VERIFY_READ, arg_ptr, _IOC_SIZE(cmd));
if (err) return -EFAULT;
switch(cmd) {
case P40_STREAM_GET_ENABLE: //ioctl.pcie`P40_STREAM_GET_ENABLE`
// See regmap`pcie.dma_daq_stream.enable`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_ENABLE);
__put_user(value, arg_ptr);
break;
case P40_STREAM_SET_ENABLE: //ioctl.pcie`P40_STREAM_SET_ENABLE`
// See regmap`pcie.dma_daq_stream.enable`
__get_user(value, arg_ptr);
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_ENABLE, value);
break;
case P40_STREAM_GET_READY: //ioctl.pcie`P40_STREAM_GET_READY`
// See regmap`pcie.dma_daq_stream.ready`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_READY);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FLUSH: //ioctl.pcie`P40_STREAM_GET_FLUSH`
// See regmap`pcie.dma_daq_stream.flush`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FLUSH);
__put_user(value, arg_ptr);
break;
case P40_STREAM_SET_FLUSH: //ioctl.pcie`P40_STREAM_SET_FLUSH`
// See regmap`pcie.dma_daq_stream.flush`
__get_user(value, arg_ptr);
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FLUSH, value);
break;
case P40_STREAM_GET_FPGA_BUF_BYTES: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_BYTES`
// See regmap`pcie.dma_daq_stream.fpga_buf_bytes`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_BYTES);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FPGA_BUF_DESCS: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_DESCS`
// See regmap`pcie.dma_daq_stream.fpga_buf_descs`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESCS);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FPGA_BUF_DESC_BYTES: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_DESC_BYTES`
// See regmap`pcie.dma_daq_stream.fpga_buf_desc_bytes`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESC_BYTES);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FPGA_BUF_DESCS_FILL: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_DESCS_FILL`
// See regmap`pcie.dma_daq_stream.fpga_buf_descs_fill`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESCS_FILL_HI);
value <<= 32;
value |= pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESCS_FILL_LO);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FPGA_BUF_DESC_FILL_BYTES: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_DESC_FILL_BYTES`
// See regmap`pcie.dma_daq_stream.fpga_buf_desc_fill_bytes`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESC_FILL_BYTES);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_FPGA_BUF_DESCS_BUSY: //ioctl.pcie`P40_STREAM_GET_FPGA_BUF_DESCS_BUSY`
// See regmap`pcie.dma_daq_stream.fpga_buf_descs_busy`
value = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESCS_BUSY_HI);
value <<= 32;
value |= pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_FPGA_BUF_DESCS_BUSY_LO);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_HOST_BUF_WRITE_OFF: //ioctl.pcie`P40_STREAM_GET_HOST_BUF_WRITE_OFF`
value = dma_stream_get_write_off(stream);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_HOST_BUF_READ_OFF: //ioctl.pcie`P40_STREAM_GET_HOST_BUF_READ_OFF`
value = dma_stream_get_read_off(stream);
__put_user(value, arg_ptr);
break;
/*
case P40_STREAM_SET_HOST_BUF_READ_OFF:
__get_user(value, arg_ptr);
iowrite32(value & 0xFFFFFFFF, stream->state->common->bar1_regs
+ stream->regs_base + P40_DMA_DAQ_STREAM_OFF_HOST_BUF_READ_OFF);
break;
*/
/*case P40_STREAM_GET_HOST_MAP_ENTRIES:
value = ioread32(stream->state->common->bar1_regs
+ stream->regs_base + P40_DMA_DAQ_STREAM_OFF_HOST_MAP_ENTRIES);
__put_user(value, arg_ptr);
break;
case P40_STREAM_SET_HOST_MAP_ENTRIES:
__get_user(value, arg_ptr);
iowrite32(value & 0xFFFFFFFF, stream->state->common->bar1_regs
+ stream->regs_base + P40_DMA_DAQ_STREAM_OFF_HOST_MAP_ENTRIES);
break;*/
case P40_STREAM_GET_HOST_BUF_BYTES: //ioctl.pcie`P40_STREAM_GET_HOST_BUF_BYTES`
value = stream->map.size;
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_HOST_BUF_BYTES_USED: //ioctl.pcie`P40_STREAM_GET_HOST_BUF_BYTES_USED`
value = dma_stream_get_bytes_used(stream);
__put_user(value, arg_ptr);
break;
case P40_STREAM_GET_HOST_MSI_COUNT: //ioctl.pcie`P40_STREAM_GET_HOST_MSI_COUNT`
value = stream->msi_count;
__put_user(value, arg_ptr);
break;
case P40_STREAM_FREE_HOST_BUF_BYTES: //ioctl.pcie`P40_STREAM_FREE_HOST_BUF_BYTES`
{
uint64_t host_buf_bytes = stream->map.size;
uint64_t host_write_off, host_read_off;
__get_user(value, arg_ptr);
spin_lock_irqsave(&stream->off_lock, stream->off_flags);
host_write_off = dma_stream_get_write_off(stream); //TODO: use cached value
host_read_off = dma_stream_get_read_off(stream); //TODO: use cached value
if (host_write_off >= host_read_off) {
value = min(value, host_write_off - host_read_off);
} else {
value = min(value, host_buf_bytes - (host_read_off - host_write_off));
}
dma_stream_set_read_off(stream, (host_read_off + value) % host_buf_bytes);
spin_unlock_irqrestore(&stream->off_lock, stream->off_flags);
__put_user(value, arg_ptr);
break;
}
default:
printk(P40_ERR "invalid ioctl\n", P40_PARM);
return -EINVAL;
}
return 0;
}
//+`dma_mmap`
static int dma_mmap(struct file* filp, struct vm_area_struct* vma)//;?>
{
struct pcie40_dma_stream *stream = filp->private_data;
int ret;
dma_map_print(&stream->map);
ret = dma_map_mmap(&stream->map, vma);
vma->vm_private_data = stream;
return ret;
}
//+`dma_poll`
static unsigned int dma_poll(struct file *filp, poll_table *wait)//;?>
{
struct pcie40_dma_stream *stream = filp->private_data;
uint32_t host_write_off, host_read_off;
spin_lock_irqsave(&stream->off_lock, stream->off_flags);
host_write_off = dma_stream_get_write_off(stream);
host_read_off = stream->read_off;
spin_unlock_irqrestore(&stream->off_lock, stream->off_flags);
//printk(P40_INFO, P40_PARM);
poll_wait(filp, &stream->state->wait, wait);
if (host_write_off != host_read_off)
return POLLIN | POLLRDNORM;
//if (dma-stopped)
// return POLLIN | POLLRDNORM; //POLLHUP;
if (!pcie40_device_accessible(stream->state->common))
return POLLERR;
return 0;
}
//+`dma_open`
static int dma_open(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_dma_stream *stream = container_of(inode->i_cdev, struct pcie40_dma_stream, cdev);
//printk(P40_INFO, P40_PARM);
filp->private_data = stream;
return 0;
}
//+`dma_release`
static int dma_release(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_dma_stream *stream = container_of(inode->i_cdev, struct pcie40_dma_stream, cdev);
//printk(P40_INFO, P40_PARM);
if (filp->private_data != stream) {
printk(P40_DIAG "inconsistent private_data\n", P40_PARM);
return -EINVAL;
}
filp->private_data = NULL;
return 0;
}
static struct file_operations dma_file_ops = {
.owner = THIS_MODULE,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
.ioctl = dma_ioctl,
#else
.unlocked_ioctl = dma_ioctl,
#endif
.mmap = dma_mmap,
.poll = dma_poll,
.open = dma_open,
.release = dma_release,
};
static inline uint32_t pcie40_read32_stream(struct pcie40_dma_stream *stream, unsigned long offset)
{
#ifndef PCIE40_EMU
return ioread32(stream->state->common->bar1_regs + stream->regs_base + offset);
#else
return *(uint32_t *)(stream->state->common->bar1_regs + stream->regs_base + offset);
#endif
}
static inline void pcie40_write32_stream(struct pcie40_dma_stream *stream, unsigned long offset, uint32_t value)
{
#ifndef PCIE40_EMU
iowrite32(value, stream->state->common->bar1_regs + stream->regs_base + offset);
#else
*(uint32_t *)(stream->state->common->bar1_regs + stream->regs_base + offset) = value;
#endif
}
//+`dma_map_mmap`
static int dma_map_mmap(struct pcie40_dma_map* map, struct vm_area_struct *vma)//?>
{
int rc;
int i = 0;
size_t mapped_bytes = 0;
unsigned long mapped_end = vma->vm_start;
printk(P40_INFO "VIRT=%lx PHYS=0x%pad SIZE=%lu\n", P40_PARM,
vma->vm_start, &(map->entries[0].start), vma->vm_end - vma->vm_start);
//TODO: make this mapping read only
if (map->num_entries <= map->max_entries) {
while (mapped_end < vma->vm_end) {
struct pcie40_dma_buffer *buffer = map->entries + i;
rc = remap_pfn_range(vma, mapped_end, buffer->start >> PAGE_SHIFT,
buffer->size, vma->vm_page_prot);
mapped_bytes += buffer->size;
mapped_end += buffer->size;
if (rc) {
printk(P40_DIAG "remap_pfn_range() failed\n", P40_PARM);
//TODO: undo all previous mappings before returning
return rc;
}
i = (i+1) % map->num_entries;
}
}
printk(P40_INFO "mapped %zu bytes\n", P40_PARM, mapped_bytes);
return 0;
}
static int dma_map_munmap(struct pcie40_dma_map* map)
{
return 0;
}
static void dma_map_print(struct pcie40_dma_map* map)
{
printk(P40_DIAG "memory map: %zu entries\n", P40_PARM, map->num_entries);/*
int i;
for (i = 0; i < map->num_entries; ++i) {
struct pcie40_dma_buffer* buffer = map->entries + i;
printk(P40_DIAG " [%3d] base: 0x%pad size: %zu bytes\n", P40_PARM, i,
(void *)dma_map_read_entry_base(map, i),
dma_map_read_entry_size(map, i)
);
}*/
}
static uint32_t dma_stream_get_read_off(struct pcie40_dma_stream *stream)
{
// See regmap`pcie.dma_daq_stream.host_buf_read_off
//TODO: spin lock to serialize access!
return (stream->read_off = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_BUF_READ_OFF));
}
static void dma_stream_set_read_off(struct pcie40_dma_stream *stream, uint32_t read_off)
{
// See regmap`pcie.dma_daq_stream.host_buf_read_off`
//TODO: spin lock to serialize access!
stream->read_off = read_off;
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_BUF_READ_OFF, read_off);
}
static uint32_t dma_stream_get_write_off(struct pcie40_dma_stream *stream)
{
// See regmap`pcie.dma_daq_stream.host_buf_write_off`
//TODO: spin lock to serialize access!
return (stream->write_off = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_BUF_WRITE_OFF));
}
//+`dma_stream_configure`
static int dma_stream_configure(int dev_id, struct pcie40_dma_stream *stream, size_t map_base, size_t map_max_entries, size_t buf_desired_bytes)//;?>
{
int rc;
uint32_t enable;
stream->msi_count = 0;
spin_lock_init(&stream->off_lock);
enable = pcie40_read32_stream(stream, P40_DMA_DAQ_STREAM_OFF_ENABLE);
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_ENABLE, 0);
rc = pcie40_setup_cdev(pcie40_daq_class, &stream->cdev, stream->state->dev_num, stream->cdev_minor, 1, stream->cdev_name, dev_id, &dma_file_ops);
if (rc < 0) {
goto err_setup_cdev;
}
printk(P40_INFO "allocating memory (%lu bytes)", P40_PARM, buf_desired_bytes);
#ifndef PCIE40_EMU
if (dma_map_alloc(stream->state->common->pci_dev, &stream->map, stream->state->common->bar1_regs + map_base, map_max_entries, buf_desired_bytes) < 0) {
goto err_map_alloc;
}
#else
if (dma_map_emu_alloc(&stream->map, stream->state->common->bar1_regs + map_base, map_max_entries, buf_desired_bytes) < 0) {
goto err_map_alloc;
}
#endif
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_MAP_ENTRIES, stream->map.num_entries);
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_ENABLE, enable);
dma_map_print(&stream->map);
return 0;
err_setup_cdev:
err_map_alloc:
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, dev_id, stream->cdev_name);
device_destroy(pcie40_daq_class, MKDEV(MAJOR(stream->state->dev_num), MINOR(stream->state->dev_num)+stream->cdev_minor));
return -1;
}
//+`dma_stream_destroy`
static void dma_stream_destroy(int dev_id, struct pcie40_dma_stream *stream)//;?>
{
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, dev_id, stream->cdev_name);
device_destroy(pcie40_daq_class,
MKDEV(MAJOR(stream->state->dev_num),
MINOR(stream->state->dev_num)+stream->cdev_minor));
printk(P40_INFO "free DMA buffer(s)\n", P40_PARM);
#ifndef PCIE40_EMU
dma_map_free(stream->state->common->pci_dev, &stream->map);
#else
dma_map_emu_free(&stream->map);
#endif
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_MAP_ENTRIES, 0);
}
//+`pcie40_daq_init`
int pcie40_daq_init(void)//;?>
{
int rc = 0;
pcie40_daq_class = class_create(THIS_MODULE, PCIE40_DAQ_CLASS);
if (IS_ERR(pcie40_daq_class)) {
rc = PTR_ERR(pcie40_daq_class);
printk(P40_WARN "failed to register class, %d\n", P40_PARM, rc);
goto err_class_create;
}
//pcie40_daq_class->dev_uevent = pcie40_dev_uevent;
pcie40_daq_class->devnode = pcie40_devnode;
err_class_create:
return rc;
}
//+`pcie40_daq_exit`
void pcie40_daq_exit(void)//;?>
{
class_destroy(pcie40_daq_class);
}
#endif//__PCIE40_DRIVER_DAQ_H
//p40emu``+
#define P40_FMT "P40DAQemu:%s(): "
#define PCIE40_DAQ_CLASS "lhcb_pcie40_daq_emu"
#define PCIE40_EMU
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/types.h>
struct pcie40_dma_map;
static int dma_map_emu_alloc(struct pcie40_dma_map* map, void *base, size_t max_entries, size_t desired_size);
static int dma_map_emu_free(struct pcie40_dma_map* map);
#include "daq.h"
extern int mpf;
static int dma_map_emu_alloc(struct pcie40_dma_map* map, void *base, size_t max_entries, size_t desired_size)
{
int i;
map->base = base;
map->max_entries = max_entries;
map->num_entries = 0;
map->size = 0;
map->entries = kzalloc(sizeof(struct pcie40_dma_buffer)*max_entries, GFP_KERNEL);
printk(P40_DIAG "entries = 0x%p\n", P40_PARM, map->entries);
for (i = 0; i < map->max_entries; ++i) {
size_t alloc_size = desired_size - map->size > PCI_MAX_ALLOC
? PCI_MAX_ALLOC
: desired_size - map->size;
while (1) {
struct pcie40_dma_buffer* buffer = map->entries + i;
buffer->size = alloc_size;
buffer->ptr = kzalloc(buffer->size, GFP_KERNEL);
//printk("...%zu", buffer->size);
buffer->start = virt_to_phys(buffer->ptr);
if (buffer->ptr == NULL) {
if (buffer->size < 1024*1024) {
printk("...NOT ENOUGH MEMORY!\n");
map->num_entries = i;
dma_map_emu_free(map);
return -1;
}
alloc_size /= 2;
} else {
map->size += buffer->size;
break;
}
}
if (map->size >= desired_size) {
++i;
break;
}
}
map->num_entries = i;
return 0;
}
static int dma_map_emu_free(struct pcie40_dma_map* map)
{
int i;
for (i = 0; i < map->num_entries; ++i) {
struct pcie40_dma_buffer* buffer = map->entries + i;
kfree(buffer->ptr);
}
kfree(map->entries);
map->entries = NULL;
return 0;
}
static void dma_stream_set_write_off(struct pcie40_dma_stream *stream, uint32_t write_off)
{
//TODO: spin lock to serialize access!
stream->write_off = write_off;
pcie40_write32_stream(stream, P40_DMA_DAQ_STREAM_OFF_HOST_BUF_WRITE_OFF, write_off);
}
//+`dma_stream_emu_write` Emulate a dma write into a given stream.
static ssize_t dma_stream_emu_write(
struct pcie40_dma_stream *stream, int *map_idx, uint32_t *buf_off, const void *from, size_t bytes)//;?>
//>`stream` Stream to write to.
//><`map_idx` Index of current DMA buffer, must be initialized to 0.
//><`buf_off` Offset within current DMA buffer, must be initialized to 0.
//>`from` Pointer to data to write, if NULL, zeroes will be written instead.
//>`bytes` Number of bytes to write.
{
uint32_t write_off, write_off_next;
ssize_t bytes_written = 0;
//? The write is started only if sufficient space is available.
size_t bytes_free = dma_stream_get_bytes_free(stream);
if (bytes_free <= bytes) {
//? If not, a negative value is immediately returned. Its magnitude is the number of bytes missing.
return bytes_free - bytes;
}
// if (write_off + 32 == (read_off & ~0x1F))
//? A write is split across multiple DMA buffers if the current one does not have sufficient space available.
while (bytes_written < bytes) {
struct pcie40_dma_buffer *buf = stream->map.entries + *map_idx;
size_t bytes_left = bytes - bytes_written;
if (*buf_off + bytes_left <= buf->size) {
if (from) {
memcpy(buf->ptr + *buf_off, from + bytes_written, bytes_left);
} else {
memset(buf->ptr + *buf_off, 0, bytes_left);
}
bytes_written += bytes_left;
*buf_off += bytes_left;
} else {
size_t bytes_avail = buf->size - *buf_off;
if (from) {
memcpy(buf->ptr + *buf_off, from + bytes_written, bytes_avail);
} else {
memset(buf->ptr + *buf_off, 0, bytes_avail);
}
*map_idx = (*map_idx + 1) % stream->map.num_entries;
bytes_written += bytes_avail;
*buf_off = 0;
}
}
spin_lock_irqsave(&stream->off_lock, stream->off_flags);
write_off = dma_stream_get_write_off(stream);
write_off_next = (write_off + bytes) % stream->map.size;
dma_stream_set_write_off(stream, write_off_next);
spin_unlock_irqrestore(&stream->off_lock, stream->off_flags);
return bytes;
}
//+`daq_emu_thread` Thread loop generating emulated board data.
//>`data` Opaque pointer to interface state
static int daq_emu_thread(void *data) //?>
{
struct pcie40_daq_state *state = data;
uint64_t evid_frg = 0;
int main_buf = 0;
uint32_t main_off = 0;
int meta_buf = 0;
uint32_t meta_off = 0;
int meta_idx = 0;
ssize_t write_status = 0;
enum { GEN_MHDR, GEN_FRG, GEN_META, GEN_MPAD } gen_state = GEN_MHDR;
//? One instance of this thread is spawned for each emulated interface.
printk(P40_INFO "starting emulator thread for interface %d\n",
P40_PARM, state->common->dev_id);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_META_PACKING, mpf);
//? The thread loops until the module is unloaded. <?
while (!kthread_should_stop()) { //... } ?>
uint32_t mmr_reset, mmr_main_gen_ctl, mmr_main_gen_fixed, mmr_main_raw_mode, mmr_main_enable, mmr_meta_enable;
struct pcie40_dma_buffer *main_buffer = state->main_stream.map.entries + main_buf;
struct pcie40_dma_buffer *meta_buffer = state->meta_stream.map.entries + meta_buf;
//? Every loop iteration reads the emulated control registers <?
mmr_reset = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_RESET);
mmr_main_gen_ctl = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_CTL);
mmr_main_gen_fixed = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_FIXED);
mmr_main_raw_mode = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_RAW_MODE);
mmr_main_enable = pcie40_read32_stream(&state->main_stream, P40_DMA_DAQ_STREAM_OFF_ENABLE);
mmr_meta_enable = pcie40_read32_stream(&state->meta_stream, P40_DMA_DAQ_STREAM_OFF_ENABLE);
mpf = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_META_PACKING);
//?>Then uses these values to drive its behaviour for the rest of the current emulation cycle.
if (mmr_reset & (1 << P40_RST_BIT_DEFAULT)) {
printk(P40_INFO "%d: reset default\n", P40_PARM, state->common->dev_id);
mmr_main_gen_ctl &= ~(1 << P40_MAIN_GEN_BIT_ENABLE);
mmr_main_gen_ctl &= ~(1 << P40_MAIN_GEN_BIT_FIXED);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_CTL, mmr_main_gen_ctl);
mmr_main_gen_fixed = 0x6243484C;
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_CTL, mmr_main_gen_fixed);
mmr_main_enable = 1;
pcie40_write32_stream(&state->main_stream, P40_DMA_DAQ_STREAM_OFF_ENABLE, mmr_main_enable);
mmr_meta_enable = 0;
pcie40_write32_stream(&state->meta_stream, P40_DMA_DAQ_STREAM_OFF_ENABLE, mmr_meta_enable);
mmr_reset |= (1 << P40_RST_BIT_LOGIC);
}
if (mmr_reset & (1 << P40_RST_BIT_LOGIC)) {
printk(P40_INFO "%d: reset logic\n", P40_PARM, state->common->dev_id);
mmr_main_gen_ctl &= ~(1 << P40_MAIN_GEN_BIT_RUNNING);
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_GEN_CTL, mmr_main_gen_ctl);
dma_stream_set_read_off(&state->main_stream, 0);
dma_stream_set_write_off(&state->main_stream, 0);
dma_stream_set_read_off(&state->meta_stream, 0);
dma_stream_set_write_off(&state->meta_stream, 0);
gen_state = GEN_MHDR;
}
if (mmr_reset & (1 << P40_RST_BIT_FLUSH)) {
printk(P40_INFO "%d: reset flush\n", P40_PARM, state->common->dev_id);
}
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_RESET, 0);
retry:
//Jump to this point to retry the last operation without re-reading the registers
// (useful in the middle of flushing, once the reset register has been zeroed but we're not done flushing yet)
if (!(mmr_main_gen_ctl & (1 << P40_MAIN_GEN_BIT_ENABLE))) {
evid_frg = 0;
main_buf = 0;
main_off = 0;
meta_buf = 0;
meta_off = 0;
meta_idx = 0;
gen_state = GEN_MHDR;
}
if ((mmr_main_enable == 1) && (mmr_main_gen_ctl & (1 << P40_MAIN_GEN_BIT_ENABLE))) {
struct __attribute__((__packed__)) {
uint64_t evid;
uint32_t ghdr;
uint8_t data[128 - sizeof(uint64_t) - sizeof(uint32_t)]; //Must be aligned to 32 bytes
} frg_nometa;
struct __attribute__((__packed__)) {
char magic[6];
uint16_t frags;
uint64_t evid;
uint64_t offset;
} meta_hdr;
struct __attribute__((__packed__)) {
uint32_t ghdr;
uint8_t data[104 - sizeof(uint32_t)]; //Must be aligned to 8 bytes
} frg;
uint16_t meta;
size_t meta_size = sizeof(meta_hdr) + mpf * sizeof(meta);
size_t meta_left = (mpf - meta_idx) * sizeof(meta);
size_t meta_pad = meta_size % 8 ? (meta_size - meta_size % 8) : 0; //Metadata is also aligned to 8 bytes
if (mmr_reset & (1 << P40_RST_BIT_FLUSH)) {
if (mmr_meta_enable) {
switch (gen_state) {
case GEN_MHDR:
// Just generate a dummy header with an evid of 0xFF...
evid_frg = 0;
meta_idx = 0;
memcpy(meta_hdr.magic, "META40", sizeof(meta_hdr.magic));
meta_hdr.evid = 0xFFFFFFFFFFFFFFFF;
meta_hdr.offset = main_off;
write_status = dma_stream_emu_write(&state->meta_stream,
&meta_buf, &meta_off, &meta_hdr, sizeof(meta_hdr));
if (write_status < 0) {
msleep_interruptible(1);
goto retry;
}
break;
case GEN_FRG:
//Before the flush we were supposed to emit a fragment, now we aren't anymore
//just fallthrough to the next case
case GEN_META:
//Also nothing to do here, no new fragments to create metadata for
//just fallthrough to the next case
case GEN_MPAD:
write_status = dma_stream_emu_write(&state->meta_stream,
&meta_buf, &meta_off, NULL, meta_left + meta_pad);
if (write_status >= 0) {
msleep_interruptible(1);
goto retry;
}
break;
}
} else {
frg_nometa.evid = 0xFFFFFFFFFFFFFFFF;
frg_nometa.ghdr = ((100 * 8) & 0xFFFFF) | ((evid_frg & 0xFFF) << 20); //Make them 100 bytes
memset(frg_nometa.data, 0, sizeof(frg_nometa.data));
write_status = dma_stream_emu_write(&state->main_stream,
&main_buf, &main_off, &frg_nometa, sizeof(frg_nometa));
if (write_status > 0) {
evid_frg = 0;
}
}
} else //Not flushing
if (mmr_main_gen_ctl & (1 << P40_MAIN_GEN_BIT_RUNNING)) {
if (mmr_main_gen_ctl & (1 << P40_MAIN_GEN_BIT_FIXED)) {
// Generate fixed pattern
uint32_t pattern[4];
mmr_main_raw_mode = 1;
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_RAW_MODE, mmr_main_raw_mode);
//Because the FPGA writes payloads in BE
pattern[0] = pattern[1] = pattern[2] = pattern[3] = cpu_to_be32(mmr_main_gen_fixed);
write_status = dma_stream_emu_write(&state->main_stream,
&main_buf, &main_off, &pattern, sizeof(pattern));
} else {
// Generate fragments
mmr_main_raw_mode = 0;
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_MAIN_RAW_MODE, mmr_main_raw_mode);
// If the metadata stream is enabled, use it for metadata
if (mmr_meta_enable) {
switch (gen_state) {
case GEN_MHDR:
meta_idx = 0;
memcpy(meta_hdr.magic, "META40", sizeof(meta_hdr.magic));
meta_hdr.frags = mpf;
meta_hdr.evid = evid_frg;
meta_hdr.offset = main_off;
write_status = dma_stream_emu_write(&state->meta_stream,
&meta_buf, &meta_off, &meta_hdr, sizeof(meta_hdr));
if (write_status > 0) {
gen_state = GEN_FRG;
}
break;
case GEN_FRG:
frg.ghdr = ((100 * 8) & 0xFFFFF) | ((evid_frg & 0xFFF) << 20); //Make them 100 bytes
memset(frg.data, (uint8_t)evid_frg, sizeof(frg.data));
write_status = dma_stream_emu_write(&state->main_stream,
&main_buf, &main_off, &frg, sizeof(frg));
if (write_status > 0) {
++meta_idx;
gen_state = GEN_META;
}
break;
case GEN_META:
meta = (frg.ghdr & 0xFFFFF) / 8; //Turn into bytes and pad to 8
if (meta % 8) meta += 8 - meta % 8;
write_status = dma_stream_emu_write(&state->meta_stream,
&meta_buf, &meta_off, &meta, sizeof(meta));
if (write_status > 0) {
if (meta_idx < mpf) {
gen_state = GEN_FRG;
} else {
gen_state = GEN_MPAD;
}
}
break;
case GEN_MPAD:
write_status = dma_stream_emu_write(&state->meta_stream,
&meta_buf, &meta_off, NULL, meta_pad);
if (write_status >= 0) {
gen_state = GEN_MHDR;
}
break;
}
} else {
// Otherwise generate only fragments
frg_nometa.evid = evid_frg;
frg_nometa.ghdr = ((100 * 8) & 0xFFFFF) | ((evid_frg & 0xFFF) << 20); //Make them 100 bytes
memset(frg_nometa.data, (uint8_t)evid_frg, sizeof(frg_nometa.data));
write_status = dma_stream_emu_write(&state->main_stream,
&main_buf, &main_off, &frg_nometa, sizeof(frg_nometa));
if (write_status > 0) {
++evid_frg;
}
}
}
}
}
//XXX: Apparently we soft-lockup the kernel if we do not yield at least from time to time?
if (write_status < 0) {
msleep_interruptible(1);
} else {
yield();
}
}
printk(P40_INFO "stopping emulator thread for interface %d\n",
P40_PARM, state->common->dev_id);
return 0;
}
int pcie40_daq_emu_probe(struct pcie40_state *common)
{
int rc = 0;
struct pcie40_daq_state *state = NULL;
uint32_t regmap_version, fpga_version;
state = kzalloc(sizeof(struct pcie40_daq_state), GFP_KERNEL);
if (IS_ERR(state)) {
printk(P40_ERR "kzalloc()\n", P40_PARM);
rc = PTR_ERR(state);
goto err_kzalloc;
}
state->common = common;
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
init_waitqueue_head(&state->wait);
rc = alloc_chrdev_region(&(state->dev_num), P40_DAQ_CDEV_BASEMINOR, P40_DAQ_CDEV_COUNT, P40_DRV_NAME);
if (rc < 0) {
printk(P40_ERR "alloc_chrdev_region()\n", P40_PARM);
goto err_alloc_chrdev_region;
}
regmap_version = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_REGMAP);
printk(P40_INFO "Register map version: 0x%08X\n", P40_PARM, regmap_version);
fpga_version = pcie40_read32_ctrl(state->common, P40_DMA_CTRL_OFF_VERSION);
printk(P40_INFO "FPGA core version: %X.%02X (%04X)\n", P40_PARM,
fpga_version >> 24, (fpga_version >> 16) & 0xFF, fpga_version & 0xFFFF);
// CTRL endpoint
rc = pcie40_setup_cdev(pcie40_daq_class, &(state->ctrl_cdev), state->dev_num, CTRL_CDEV_MINOR, 1, CTRL_CDEV_NAME, state->common->dev_id, &ctrl_file_ops);
if (rc < 0) {
goto err_dev_ctrl;
}
// TODO: the streams should be configured only when the corresponding device is opened (so, if we're not using metadata or odin, that memory will not be allocated at all)
if (mainmibs < 0 || mainmibs > MAIN_BUF_MIBS_MAX) {
mainmibs = MAIN_BUF_MIBS_MAX;
}
// MAIN stream
state->main_stream.cdev_name = MAIN_CDEV_NAME;
state->main_stream.cdev_minor = MAIN_CDEV_MINOR;
state->main_stream.regs_base = P40_DMA_DAQ_MAIN_STREAM_QSYS_BASE;
state->main_stream.state = state;
rc = dma_stream_configure(state->common->dev_id, &state->main_stream,
P40_DMA_DAQ_MAIN_MAP_QSYS_BASE, MAIN_MAP_MAX_ENTRIES, mainmibs * 1024LL*1024LL);
if (rc < 0) {
goto err_main_configure;
}
if (metamibs < 0 || metamibs > META_BUF_MIBS_MAX) {
metamibs = META_BUF_MIBS_MAX;
}
// META stream
state->meta_stream.cdev_name = META_CDEV_NAME;
state->meta_stream.cdev_minor = META_CDEV_MINOR;
state->meta_stream.regs_base = P40_DMA_DAQ_META_STREAM_QSYS_BASE;
state->meta_stream.state = state;
rc = dma_stream_configure(state->common->dev_id, &state->meta_stream,
P40_DMA_DAQ_META_MAP_QSYS_BASE, META_MAP_MAX_ENTRIES, metamibs * 1024LL*1024LL);
if (rc < 0) {
goto err_meta_configure;
}
// Start in reset mode (the bit auto clears)
pcie40_write32_ctrl(state->common, P40_DMA_CTRL_OFF_RESET, 1 << P40_RST_BIT_DEFAULT);
state->emu_thread = kthread_run(daq_emu_thread, state, "P40DAQemu%d", common->dev_id);
common->daq_state = state;
return rc;
//dma_stream_destroy(state->common->dev_id, &state->meta_stream);
err_meta_configure:
dma_stream_destroy(state->common->dev_id, &state->main_stream);
err_main_configure:
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, CTRL_CDEV_NAME);
device_destroy(pcie40_daq_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+CTRL_CDEV_MINOR));
err_dev_ctrl:
unregister_chrdev_region(state->dev_num, P40_DAQ_CDEV_COUNT);
err_alloc_chrdev_region:
kfree(state);
err_kzalloc:
return rc;
}
void pcie40_daq_emu_remove(struct pcie40_state *common)
{
struct pcie40_daq_state *state = common->daq_state;
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
if (!state) {
printk(P40_ERR "no state\n", P40_PARM);
return;
}
if (state->emu_thread) {
kthread_stop(state->emu_thread);
}
dma_stream_destroy(state->common->dev_id, &state->meta_stream);
dma_stream_destroy(state->common->dev_id, &state->main_stream);
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, CTRL_CDEV_NAME);
device_destroy(pcie40_daq_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+CTRL_CDEV_MINOR));
unregister_chrdev_region(state->dev_num, P40_DAQ_CDEV_COUNT);
kfree(state);
}
//p40fpga``+
#define P40_FMT "P40ECS:%s(): "
#define PCIE40_ECS_CLASS "lhcb_pcie40_ecs"
#include "ecs.h"
#include "pcie40_ioctl.h"
//dg`wt.pcie40.fpga.ecs` The ECS submodule implements the system calls behind the BAR0 and BAR2 device files. The submodule is initialized and uninitialized at the same time as the main module, through the p40driver`pcie40_ecs_init` and p40driver`pcie40_ecs_exit` functions. Likewise the ECS-specific probing logic is encapsulated in p40fpga`pcie40_ecs_probe` and p40fpga`pcie40_ecs_remove` .
static void pcie40_ecs_set_drvdata(struct pci_dev *pdev, struct pcie40_ecs_state *state)
{
struct pcie40_state *common = pci_get_drvdata(pdev);
common->ecs_state = state;
common->ecs_state->common = common;
}
static struct pcie40_ecs_state *pcie40_ecs_get_drvdata(struct pci_dev *pdev)
{
struct pcie40_state *common = pci_get_drvdata(pdev);
return common->ecs_state;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
static int ecs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
#else
static long ecs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#endif//+`ecs_ioctl`
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
struct inode *inode = filp->f_inode;
#endif
struct pcie40_ecs_state *state = filp->private_data;
int bar;
switch (iminor(inode)) {
case BAR0_CDEV_MINOR:
bar = 0;
break;
case BAR2_CDEV_MINOR:
bar = 2;
break;
default:
return -EINVAL;
}
//ioctl.pcie`P40_ECS_GET_BAR_SIZE`
if (cmd != P40_ECS_GET_BAR_SIZE) {
printk(P40_DIAG "invalid ioctl command\n", P40_PARM);
return -EINVAL;
}
printk(P40_INFO "ECS BAR size is %lu\n", P40_PARM, state->common->bar_size[bar]);
return state->common->bar_size[bar];
}
//+`ecs_mmap`
static int ecs_mmap(struct file* filp, struct vm_area_struct* vma)//;?>
{
int rc = 0;
struct pcie40_ecs_state *state = filp->private_data;
int bar;
switch (iminor(filp->f_path.dentry->d_inode)) {
case BAR0_CDEV_MINOR:
bar = 0;
break;
case BAR2_CDEV_MINOR:
bar = 2;
break;
default:
return -EINVAL;
}
//vma->vm_flags |= VM_IO | VM_RESERVED; //VM_DONTEXPAND | VM_DONTDUMP; for 3.11
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
printk(P40_INFO "VIRT=%lx PHYS=%lx SIZE=%lu\n", P40_PARM,
vma->vm_start, state->common->bar_start[bar], vma->vm_end - vma->vm_start);
//this needs a more recent kernel than what we have now
//rc = vm_iomap_memory(vma, vma->vm_start, vma->vm_end - vma->vm_start);
rc = io_remap_pfn_range(vma, vma->vm_start,
state->common->bar_start[bar] >> PAGE_SHIFT, vma->vm_end - vma->vm_start,
vma->vm_page_prot);
if (rc) {
printk(P40_DIAG "io_remap_pfn_range()\n", P40_PARM);
return rc;
}
//vma->vm_ops = &bar_vm_ops;
return 0;
}
static struct file_operations ecs_file_ops = {
.owner = THIS_MODULE,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
.ioctl = ecs_ioctl,
#else
.unlocked_ioctl = ecs_ioctl,
#endif
.mmap = ecs_mmap,
.open = ecs_open,
.release = ecs_release,
};
//+`pcie40_ecs_probe` Initialize ECS BARs and create device files.
int pcie40_ecs_probe(struct pci_dev *dev, const struct pci_device_id *id)//;?>
{
int rc = 0;
struct pcie40_state *common;
struct pcie40_ecs_state *state = NULL;
common = pci_get_drvdata(dev);
//? This function allocates a p40driver`pcie40_ecs_state` instance to keep ECS-specific state.
state = kzalloc(sizeof(struct pcie40_ecs_state), GFP_KERNEL);
if (IS_ERR(state)) {
printk(P40_ERR "kzalloc()\n", P40_PARM);
rc = PTR_ERR(state);
goto err_kzalloc;
}
state->common = common;
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
//? It then requests exclusive access to the ECS BARs
rc = pci_request_selected_regions_exclusive(dev, P40_ECS_BARS_MASK, P40_DRV_NAME);
if (rc) {
printk(P40_WARN "unable to reserve ECS regions\n", P40_PARM);
goto err_pci_request_regions_ecs;
}
//? and allocates a range of minor numbers for its character devices.
rc = alloc_chrdev_region(&(state->dev_num), P40_ECS_CDEV_BASEMINOR, P40_ECS_CDEV_COUNT, P40_DRV_NAME);
if (rc < 0) {
printk(P40_ERR "alloc_chrdev_region()\n", P40_PARM);
goto err_alloc_chrdev_region;
}
//? One such device is created for BAR0
if (state->common->bar_size[0]) {
rc = pcie40_setup_cdev(pcie40_ecs_class, &(state->bar0_cdev), state->dev_num, BAR0_CDEV_MINOR, 0, BAR0_CDEV_NAME, state->common->dev_id, &ecs_file_ops);
if (rc < 0) {
goto err_bar0_dev;
}
}
//? and a second one for BAR2.
if (state->common->bar_size[2]) {
rc = pcie40_setup_cdev(pcie40_ecs_class, &(state->bar2_cdev), state->dev_num, BAR2_CDEV_MINOR, 2, BAR2_CDEV_NAME, state->common->dev_id, &ecs_file_ops);
if (rc < 0) {
goto err_bar2_dev;
}
}
pcie40_ecs_set_drvdata(dev, state);
return rc;
err_bar2_dev:
if (state->common->bar_size[0]) {
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, BAR0_CDEV_NAME);
device_destroy(pcie40_ecs_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+BAR0_CDEV_MINOR));
}
err_bar0_dev:
unregister_chrdev_region(state->dev_num, P40_ECS_CDEV_COUNT);
err_alloc_chrdev_region:
pci_release_selected_regions(dev, P40_ECS_BARS_MASK);
err_pci_request_regions_ecs:
kfree(state);
err_kzalloc:
return rc;
}
//+`pcie40_ecs_remove` Destroy ECS device files.
void pcie40_ecs_remove(struct pci_dev *dev)//;?>
{
struct pcie40_ecs_state *state;
printk(P40_DIAG "pci_dev = 0x%p\n", P40_PARM, dev);
state = pcie40_ecs_get_drvdata(dev);
if (!dev || !state) {
printk(P40_DIAG "remove(dev = 0x%p) dev->driver_data = 0x%p\n", P40_PARM, dev, state);
return;
}
if (state->common->bar_size[2]) {
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, BAR2_CDEV_NAME);
device_destroy(pcie40_ecs_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+BAR2_CDEV_MINOR));
}
if (state->common->bar_size[0]) {
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, BAR0_CDEV_NAME);
device_destroy(pcie40_ecs_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+BAR0_CDEV_MINOR));
}
unregister_chrdev_region(state->dev_num, P40_ECS_CDEV_COUNT);
printk(P40_INFO "releasing PCI regions\n", P40_PARM);
pci_release_selected_regions(dev, P40_ECS_BARS_MASK);
kfree(state);
}
#ifndef __PCIE40_DRIVER_ECS_H
#define __PCIE40_DRIVER_ECS_H
//p40driver``+
#include "common.h"
#include <linux/fs.h>
#define P40_ECS_CDEV_BASEMINOR (0)
#define P40_ECS_CDEV_COUNT (2)
#define P40_ECS_BARS_MASK ((1<<0)|(1<<2)) //BAR0, BAR2
//ug`pcie40_driver.files`bar0 _ /dev/pcie40_?_bar0::
// Device to access user registers on the FPGA.
#define BAR0_CDEV_MINOR (P40_ECS_CDEV_BASEMINOR + 0)
static const char BAR0_CDEV_NAME[] = "bar0";
//ug`pcie40_driver.files`bar2 _ /dev/pcie40_?_bar2::
// Device to access low-level registers on the FPGA.
#define BAR2_CDEV_MINOR (P40_ECS_CDEV_BASEMINOR + 1)
static const char BAR2_CDEV_NAME[] = "bar2";
//+`pcie40_ecs_state`
struct pcie40_ecs_state {
struct pcie40_state *common;
dev_t dev_num; //base MAJOR/MINOR numbers for device files
struct cdev bar0_cdev;
struct cdev bar2_cdev;
};
//+`ecs_open`
static int ecs_open(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_ecs_state *state = NULL;
switch (iminor(inode)) {
case BAR0_CDEV_MINOR:
printk(P40_INFO "BAR0", P40_PARM);
state = container_of(inode->i_cdev, struct pcie40_ecs_state, bar0_cdev);
break;
case BAR2_CDEV_MINOR:
printk(P40_INFO "BAR2", P40_PARM);
state = container_of(inode->i_cdev, struct pcie40_ecs_state, bar2_cdev);
break;
default:
printk(P40_INFO "invalid BAR", P40_PARM);
return -EINVAL;
}
filp->private_data = state;
return 0;
}
//+`ecs_release`
static int ecs_release(struct inode *inode, struct file *filp)//;?>
{
struct pcie40_ecs_state *state = NULL;
switch (iminor(inode)) {
case BAR0_CDEV_MINOR:
printk(P40_INFO "BAR0", P40_PARM);
state = container_of(inode->i_cdev, struct pcie40_ecs_state, bar0_cdev);
break;
case BAR2_CDEV_MINOR:
printk(P40_INFO "BAR2", P40_PARM);
state = container_of(inode->i_cdev, struct pcie40_ecs_state, bar2_cdev);
break;
default:
printk(P40_INFO "invalid BAR", P40_PARM);
return -EINVAL;
}
if (filp->private_data != state) {
printk(P40_ERR "inconsistent private_data\n", P40_PARM);
return -EINVAL;
}
filp->private_data = NULL;
return 0;
}
static struct class *pcie40_ecs_class = NULL;
//+`pcie40_ecs_init` Register ECS device class.
int pcie40_ecs_init(void)//;?>
{
int rc = 0;
//? This functions registers a dedicated device class used to create ECS device files.
pcie40_ecs_class = class_create(THIS_MODULE, PCIE40_ECS_CLASS);
if (IS_ERR(pcie40_ecs_class)) {
rc = PTR_ERR(pcie40_ecs_class);
printk(P40_WARN "failed to register class, %d\n", P40_PARM, rc);
goto err_class_create;
}
//pcie40_ecs_class->dev_uevent = pcie40_dev_uevent;
pcie40_ecs_class->devnode = pcie40_devnode;
err_class_create:
return rc;
}
//+`pcie40_ecs_exit` Destroy ECS device class.
void pcie40_ecs_exit(void)//;?>
{
class_destroy(pcie40_ecs_class);
}
#endif//__PCIE40_DRIVER_ECS_H
//p40emu``+
#define P40_FMT "P40ECSemu:%s(): "
#define PCIE40_ECS_CLASS "lhcb_pcie40_ecs_emu"
#define PCIE40_EMU
#include "ecs.h"
#include "pcie40_ioctl.h"
//static void ecs_emu_vma_open(struct vm_area_struct *vma)
//{
// printk(P40_INFO "VIRT=%lx, PHYS=%lx SIZE=%lx\n", P40_PARM, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, vma->vm_end - vma->vm_start);
//}
//static void ecs_emu_vma_close(struct vm_area_struct *vma)
//{
// printk(P40_INFO "VIRT=%lx, PHYS=%lx SIZE=%lx\n", P40_PARM, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, vma->vm_end - vma->vm_start);
//}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
static int ecs_emu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
#else
static int ecs_emu_vma_fault(struct vm_fault *vmf)
#endif
{
struct pcie40_ecs_state *state = vma->vm_private_data;
unsigned long offset =
(unsigned long)(vmf->virtual_address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
void *ptr = NULL;
switch (iminor(vma->vm_file->f_path.dentry->d_inode)) {
case BAR0_CDEV_MINOR:
ptr = state->common->bar0_regs + offset;
break;
case BAR2_CDEV_MINOR:
ptr = state->common->bar2_regs + offset;
break;
default:
return -EINVAL;
}
vmf->page = vmalloc_to_page(ptr);
get_page(vmf->page);
//printk(P40_DIAG "fault @ 0x%08lX\n", P40_PARM, vmf->pgoff);
return 0;
}
static struct vm_operations_struct ecs_emu_vm_ops = {
//.access = generic_access_phys,
//.open = ecs_emu_vma_open,
//.close = ecs_emu_vma_close,
.fault = ecs_emu_vma_fault,
};
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
static int ecs_emu_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
#else
static long ecs_emu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#endif //+`ecs_emu_ioctl`
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
struct inode *inode = filp->f_inode;
#endif
struct pcie40_ecs_state *state = filp->private_data;
int bar;
switch (iminor(inode)) {
case BAR0_CDEV_MINOR:
bar = 0;
break;
case BAR2_CDEV_MINOR:
bar = 2;
break;
default:
return -EINVAL;
}
//ioctl.pcie`P40_ECS_GET_BAR_SIZE`
if (cmd != P40_ECS_GET_BAR_SIZE) {
printk(P40_DIAG "invalid ioctl command\n", P40_PARM);
return -EINVAL;
}
printk(P40_INFO "ECS BAR size is %lu\n", P40_PARM, state->common->bar_size[bar]);
return state->common->bar_size[bar];
}
//+`ecs_emu_mmap`
static int ecs_emu_mmap(struct file* filp, struct vm_area_struct* vma)//;?>
{
struct pcie40_ecs_state *state = filp->private_data;
int bar;
switch (iminor(filp->f_path.dentry->d_inode)) {
case BAR0_CDEV_MINOR:
bar = 0;
break;
case BAR2_CDEV_MINOR:
bar = 2;
break;
default:
return -EINVAL;
}
vma->vm_flags |= VM_SHARED;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
printk(P40_INFO "VIRT=%lx PHYS=%lx SIZE=%lu\n", P40_PARM,
vma->vm_start, state->common->bar_start[bar], vma->vm_end - vma->vm_start);
vma->vm_ops = &ecs_emu_vm_ops;
vma->vm_private_data = state;
return 0;
}
static struct file_operations ecs_file_ops = {
.owner = THIS_MODULE,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
.ioctl = ecs_emu_ioctl,
#else
.unlocked_ioctl = ecs_emu_ioctl,
#endif
.mmap = ecs_emu_mmap,
.open = ecs_open,
.release = ecs_release,
};
int pcie40_ecs_emu_probe(struct pcie40_state *common)
{
int rc = 0;
struct pcie40_ecs_state *state = NULL;
state = kzalloc(sizeof(struct pcie40_ecs_state), GFP_KERNEL);
if (IS_ERR(state)) {
printk(P40_ERR "kzalloc()\n", P40_PARM);
rc = PTR_ERR(state);
goto err_kzalloc;
}
state->common = common;
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
rc = alloc_chrdev_region(&(state->dev_num), P40_ECS_CDEV_BASEMINOR, P40_ECS_CDEV_COUNT, P40_DRV_NAME);
if (rc < 0) {
printk(P40_ERR "alloc_chrdev_region()\n", P40_PARM);
goto err_alloc_chrdev_region;
}
// BAR0 endpoint
if (state->common->bar_size[0]) {
rc = pcie40_setup_cdev(pcie40_ecs_class, &(state->bar0_cdev), state->dev_num, BAR0_CDEV_MINOR, 0, BAR0_CDEV_NAME, state->common->dev_id, &ecs_file_ops);
if (rc < 0) {
goto err_bar0_dev;
}
}
// BAR2 endpoint
if (state->common->bar_size[2]) {
rc = pcie40_setup_cdev(pcie40_ecs_class, &(state->bar2_cdev), state->dev_num, BAR2_CDEV_MINOR, 2, BAR2_CDEV_NAME, state->common->dev_id, &ecs_file_ops);
if (rc < 0) {
goto err_bar2_dev;
}
}
common->ecs_state = state;
return rc;
err_bar2_dev:
if (state->common->bar_size[0]) {
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, BAR0_CDEV_NAME);
device_destroy(pcie40_ecs_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+BAR0_CDEV_MINOR));
}
err_bar0_dev:
unregister_chrdev_region(state->dev_num, P40_ECS_CDEV_COUNT);
err_alloc_chrdev_region:
kfree(state);
common->ecs_state = NULL;
err_kzalloc:
return rc;
}
void pcie40_ecs_emu_remove(struct pcie40_state *common)
{
struct pcie40_ecs_state *state = common->ecs_state;
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
if (!state) {
printk(P40_ERR "no state\n", P40_PARM);
return;
}
if (state->common->bar_size[2]) {
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, BAR2_CDEV_NAME);
device_destroy(pcie40_ecs_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+BAR2_CDEV_MINOR));
}
if (state->common->bar_size[0]) {
printk(P40_INFO "remove /dev/pcie40_%d_%s\n", P40_PARM, state->common->dev_id, BAR0_CDEV_NAME);
device_destroy(pcie40_ecs_class, MKDEV(MAJOR(state->dev_num), MINOR(state->dev_num)+BAR0_CDEV_MINOR));
}
unregister_chrdev_region(state->dev_num, P40_ECS_CDEV_COUNT);
kfree(state);
common->ecs_state = NULL;
}
//p40fpga``+
//ug`pcie40_driver.description`
// ``lhcb_pcie40.ko`` is the driver for the PCIe40 data acquisition card.
//dg`wt.pcie40.fpga.init` When the kernel module is loaded, p40fpga`pcie40_init` is immediately called. Its dual at unload time is p40fpga`pcie40_exit` . The actual PCI device management happens in p40fpga`pcie40_probe` and p40fpga`pcie40_remove` .
#define P40_FMT "P40:%s(): "
#include "common.h"
#include <linux/init.h>
#include <linux/module.h>
//ug`pcie40_driver.synopsis`
// modprobe *lhcb_pcie40* [ mainmibs=_M_ ] [ metamibs=_M_ ]
static LIST_HEAD(pcie40_inst_list);
static DEFINE_SPINLOCK(pcie40_inst_list_lock);
static unsigned long pcie40_inst_list_lock_flags;
//+`pcie40_ids` <?
static const struct pci_device_id pcie40_ids[] = {
{ PCI_DEVICE(0x10DC, 0xCE40), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pcie40_ids);//?>
//ug`pcie40_driver.sysfs`link _ /sys/devices/.../pcie40_link::
// Link identifier for this interface within one PCIe40 board (0 for the primary and 1 for the secondary).
static ssize_t attr_show_link(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pci = container_of(dev, struct pci_dev, dev);
struct pcie40_state *state = pci_get_drvdata(pci);
return sprintf(buf, "%d", state ? state->link_id : -1);
}
static DEVICE_ATTR(pcie40_link, S_IRUGO, attr_show_link, NULL);
//ug`pcie40_driver.sysfs`interface _ /sys/devices/.../pcie40_interface::
// Interface identifier allocated by the driver, this value uniquely identifies a PCIe40 interface within the machine.
static ssize_t attr_show_interface(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pci = container_of(dev, struct pci_dev, dev);
struct pcie40_state *state = pci_get_drvdata(pci);
return sprintf(buf, "%d", state ? state->dev_id : -1);
}
static DEVICE_ATTR(pcie40_interface, S_IRUGO, attr_show_interface, NULL);
//ug`pcie40_driver.sysfs`loaded _ /sys/devices/.../pcie40_loaded::
// 1 if the FPGA BARs are readable, 0 if the FPGA has been reprogrammed and the driver must be reloaded.
static ssize_t attr_show_loaded(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pci = container_of(dev, struct pci_dev, dev);
struct pcie40_state *state = pci_get_drvdata(pci);
return sprintf(buf, "%d", pcie40_device_accessible(state) ? 1 : 0);
}
static DEVICE_ATTR(pcie40_loaded, S_IRUGO, attr_show_loaded, NULL);
int pcie40_ecs_init(void);
void pcie40_ecs_exit(void);
int pcie40_ecs_probe(struct pci_dev *dev, const struct pci_device_id *id);
void pcie40_ecs_remove(struct pci_dev *dev);
int pcie40_daq_init(void);
void pcie40_daq_exit(void);
int pcie40_daq_probe(struct pci_dev *dev, const struct pci_device_id *id);
void pcie40_daq_remove(struct pci_dev *dev);
//+`pcie40_probe` Scan PCI bus to detect PCIe40 board.
static int pcie40_probe(struct pci_dev *dev, const struct pci_device_id *id)//;?>
{
int rc = 0;
struct pcie40_state *state = NULL, *li, *lp = NULL;
int bar;
printk(P40_DIAG "found PCI device, vendor: %08X device: %08X\n", P40_PARM, id->vendor, id->device);
//? This function allocates a p40driver`pcie40_state` instance used to track the state of this device within the kernel.
state = kzalloc(sizeof(struct pcie40_state), GFP_KERNEL);
if (IS_ERR(state)) {
printk(P40_ERR "kzalloc()\n", P40_PARM);
rc = PTR_ERR(state);
goto err_kzalloc;
}
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
INIT_LIST_HEAD(&state->list);
rc = pci_enable_device(dev);
if (rc) {
printk(P40_DIAG "pci_enable_device() failed\n", P40_PARM);
goto err_pci_enable_device;
}
pci_set_master(dev); // Without this, no interrupts will be received!!!
//"The key difference that _exclusive makes it that userspace is explicitly not allowed to map the resource via /dev/mem or sysfs."
rc = pci_request_selected_regions_exclusive(dev, P40_COMMON_BARS_MASK, P40_DRV_NAME);
if (rc) {
printk(P40_WARN "unable to reserve DAQ regions\n", P40_PARM);
goto err_pci_request_regions_daq;
}
printk(P40_INFO "initializing BARs\n", P40_PARM);
//? The state is initialized with the position and size of all PCI BARs.
for (bar = 0; bar < P40_MAX_BAR; ++bar) {
state->bar_start[bar] = pci_resource_start(dev, bar);
state->bar_size[bar] = pci_resource_len(dev, bar);
//TODO: print BAR information
}
if (!state->bar_size[1]) {
printk(P40_ERR "no BAR1 detected!\n", P40_PARM);
rc = -1;
goto err_no_bar1;
}
//? BAR0, if present, is mapped inside the kernel to be accessible by the SCA interface (in addition, both BAR0 and BAR2 are accessible by userspace via memory mapped access).
if (state->bar_start[0] && state->bar_size[0]) {
printk(P40_INFO "pci_iomap() BAR0 (%lu bytes)\n", P40_PARM, state->bar_size[1]);
state->bar0_regs = pci_iomap(dev, 0, state->bar_size[0]);
if (state->bar0_regs == NULL) {
rc = -1;
goto err_bar0_iomap;
}
}
//? BAR1 is always mapped inside the kernel as it's used directly by DAQ interface.
printk(P40_INFO "pci_iomap() BAR1 (%lu bytes)\n", P40_PARM, state->bar_size[1]);
state->bar1_regs = pci_iomap(dev, 1, state->bar_size[1]);
if (state->bar1_regs == NULL) {
rc = -1;
goto err_bar1_iomap;
}
//? Using this mapping, the driver ensures that PCIe registers on the FPGA can be accessed.
if (!pcie40_device_accessible(state)) {
rc = -1;
printk(P40_ERR "Device detected but unreadable, please re-enumerate bus to continue\n", P40_PARM);
goto err_access;
}
spin_lock_irqsave(&pcie40_inst_list_lock, pcie40_inst_list_lock_flags);
//? Then it reads the regmap`pcie.dma_ctrl.link_id` register to identify which PCIe link from the FPGA is being probed.
state->link_id = pcie40_read32_ctrl(state, P40_DMA_CTRL_OFF_LINK_ID);
//? Using this information, a unique interface identifier is allocated to the PCIe link.
if (state->link_id == 0) {
state->dev_id = 0; //? Interfaces with PCIe link 0 get an even interface id.
} else {
state->dev_id = 1; //? Interfaces on PCIe link 1 get an odd interface id.
}
//? The driver always allocates the lowest available interface id.
list_for_each_entry(li, &pcie40_inst_list, list) {
if ((state->dev_id & 1) == (li->dev_id & 1)) {
if (state->dev_id == li->dev_id) {
state->dev_id += 2;
}
}
if (lp) {
if (state->dev_id < lp->dev_id) {
list_add_tail(&state->list, &lp->list);
break;
}
}
if (state->dev_id < li->dev_id) {
list_add_tail(&state->list, &li->list);
break;
}
lp = li;
}
if (list_empty(&state->list)) {
list_add_tail(&state->list, &pcie40_inst_list);
}
spin_unlock_irqrestore(&pcie40_inst_list_lock, pcie40_inst_list_lock_flags);
state->pci_dev = dev;
pci_set_drvdata(dev, state);
//? Finally it calls the probing logic of the subdrivers via p40fpga`pcie40_ecs_probe` and p40fpga`pcie40_daq_probe` .
pcie40_ecs_probe(dev, id);
pcie40_daq_probe(dev, id);
device_create_file(&dev->dev, &dev_attr_pcie40_link);
device_create_file(&dev->dev, &dev_attr_pcie40_interface);
device_create_file(&dev->dev, &dev_attr_pcie40_loaded);
//? After initializing the subdrivers, this function always returns success, this is to ensure that p40fpga`pcie40_remove` is always called also in case only some subdrivers are loaded.
return 0;
err_access:
if (state->bar_size[1]) {
iounmap(state->bar1_regs);
state->bar1_regs = NULL;
}
err_bar1_iomap:
if (state->bar_size[0]) {
iounmap(state->bar0_regs);
state->bar0_regs = NULL;
}
err_bar0_iomap:
err_no_bar1:
pci_release_selected_regions(dev, P40_COMMON_BARS_MASK);
err_pci_request_regions_daq:
pci_disable_device(dev);
err_pci_enable_device:
kfree(state);
err_kzalloc:
return rc;
}
//+`pcie40_remove` Remove PCIe40 board from kernel.
static void pcie40_remove(struct pci_dev *dev)//;?>
{
struct pcie40_state *state = pci_get_drvdata(dev);
spin_lock_irqsave(&pcie40_inst_list_lock, pcie40_inst_list_lock_flags);
list_del(&state->list);
spin_unlock_irqrestore(&pcie40_inst_list_lock, pcie40_inst_list_lock_flags);
device_remove_file(&dev->dev, &dev_attr_pcie40_loaded);
device_remove_file(&dev->dev, &dev_attr_pcie40_interface);
device_remove_file(&dev->dev, &dev_attr_pcie40_link);
//? First the submodules are uninitialized using p40fpga`pcie40_daq_remove` and p40fpga`pcie40_ecs_remove` .
pcie40_daq_remove(dev);
pcie40_ecs_remove(dev);
//? BAR0 and BAR1 are unmapped using ``iounmap``.
if (state->bar_size[0]) {
iounmap(state->bar0_regs);
state->bar0_regs = NULL;
}
if (state->bar_size[1]) {
iounmap(state->bar1_regs);
state->bar1_regs = NULL;
}
printk(P40_INFO "releasing PCI regions\n", P40_PARM);
pci_release_selected_regions(dev, P40_COMMON_BARS_MASK);
//? Finally the PCI device is disabled
pci_disable_device(dev);
//? and the p40driver`pcie40_state` memory is freed.
kfree(state);
}
static struct pci_driver pcie40_pci_driver = {
.name = P40_DRV_NAME,
.id_table = pcie40_ids,
.probe = pcie40_probe,
.remove = pcie40_remove,
};
//+`pcie40_init` Initialize subdrivers and register PCIe driver with kernel.
static int __init pcie40_init(void)//;?>
{
int rc = 0;
//? The first module to be initialized is the ECS, using p40driver`pcie40_ecs_init` .
rc = pcie40_ecs_init();
if (rc < 0)
return rc;
//? Followed by the DAQ, using p40driver`pcie40_daq_init` .
rc = pcie40_daq_init();
if (rc < 0)
return rc;
//? The driver is registered with the kernel using ``pci_register_driver``, its argument also contains the PCI device ids that correspond to the PCIe40 firmware (see p40fpga`pcie40_ids` ).
rc = pci_register_driver(&pcie40_pci_driver);
if (rc < 0)
return rc;
return rc;
}
//+`pcie40_exit` Unregister PCIe driver and uninitialize subdrivers.
static void __exit pcie40_exit(void)//?>
{
pci_unregister_driver(&pcie40_pci_driver);
pcie40_daq_exit();
pcie40_ecs_exit();
}
//+`pcie40_init`
module_init(pcie40_init);
//+`pcie40_exit`
module_exit(pcie40_exit);
MODULE_VERSION(DAQ40_VER_REL);
MODULE_LICENSE("GPL");
//TODO: MODULE_AUTHOR
//TODO: MODULE_DESCRIPTION
//ug`pcie40_driver_emu.description`
// ``lhcb_pcie40_emu.ko`` is a driver emulating the PCIe40 data acquisition card.
#define P40_FMT "P40emu:%s(): "
#define PCIE40_EMU
#include "common.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/stat.h>
#include <linux/vmalloc.h>
//ug`pcie40_driver_emu.synopsis`
// modprobe *lhcb_pcie40_emu* [ mainmibs=_M_ ] [ metamibs=_M_ ] [ baseid=_I_ ] [ numboards=_N_ ] [ mpf=_M_ ]
static LIST_HEAD(pcie40_emu_inst_list);
//ug`pcie40_driver_emu.options`baseid *baseid* = _I_::
// First interface ID for emulated boards (0 by default, increase to avoid conflicts with real boards already detected in the machine).
int baseid = 0;
module_param(baseid, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(baseid, "First interface ID for emulated boards");
//ug`pcie40_driver_emu.options`numboards *numboards* = _N_::
// Number of PCIe40 boards to emulate (1 by default). Two DAQ interfaces will be instantiated for each emulated board.
int numboards = 1;
module_param(numboards, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(numboards, "Number of PCIe40 boards to emulate");
//ug`pcie40_driver_emu.options`mpf *mpf* = _M_::
// Packing factor for metadata blocks (10000 by default).
int mpf = 10000;
module_param(mpf, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mpf, "Packing factor for metadata blocks");
int pcie40_ecs_init(void);
void pcie40_ecs_exit(void);
int pcie40_ecs_emu_probe(struct pcie40_state *common);
void pcie40_ecs_emu_remove(struct pcie40_state *common);
int pcie40_daq_init(void);
void pcie40_daq_exit(void);
int pcie40_daq_emu_probe(struct pcie40_state *common);
void pcie40_daq_emu_remove(struct pcie40_state *common);
static int pcie40_emu_probe(int link_id)
{
int rc = 0;
struct pcie40_state *state = NULL, *li;
printk(P40_DIAG "creating emulated PCIe40 interface (link %d)", P40_PARM, link_id);
state = kzalloc(sizeof(struct pcie40_state), GFP_KERNEL);
if (IS_ERR(state)) {
printk(P40_ERR "kzalloc()\n", P40_PARM);
rc = PTR_ERR(state);
goto err_kzalloc;
}
printk(P40_DIAG "state = 0x%p\n", P40_PARM, state);
INIT_LIST_HEAD(&state->list);
printk(P40_INFO "initializing BARs\n", P40_PARM);
if (link_id == 0) {
state->bar_start[0] = 0;
state->bar_size[0] = 32 * 1024 * 1024;
}
state->bar_start[1] = 0;
state->bar_size[1] = 256 * 1024;
if (link_id == 0) {
state->bar_start[2] = 0;
state->bar_size[2] = 32 * 1024 * 1024;
}
//TODO: print BAR information
if (state->bar_size[0]) {
printk(P40_INFO "allocating fake BAR0 (%lu bytes)\n", P40_PARM, state->bar_size[0]);
state->bar0_regs = vzalloc(state->bar_size[0]);
if (state->bar0_regs == NULL) {
rc = -1;
goto err_bar0_alloc;
}
}
if (state->bar_size[1]) {
printk(P40_INFO "allocating fake BAR1 (%lu bytes)\n", P40_PARM, state->bar_size[1]);
state->bar1_regs = vzalloc(state->bar_size[1]);
if (state->bar1_regs == NULL) {
rc = -1;
goto err_bar1_alloc;
}
}
if (state->bar_size[2]) {
printk(P40_INFO "allocating fake BAR2 (%lu bytes)\n", P40_PARM, state->bar_size[2]);
state->bar2_regs = vzalloc(state->bar_size[2]);
if (state->bar2_regs == NULL) {
rc = -1;
goto err_bar2_alloc;
}
}
//Fill fake BAR1 with plausible values
pcie40_write32_ctrl(state, P40_DMA_CTRL_OFF_RWTEST, 0xCE40FACC);
pcie40_write32_ctrl(state, P40_DMA_CTRL_OFF_REGMAP, P40_DMA_REGMAP_VERSION);
pcie40_write32_ctrl(state, P40_DMA_CTRL_OFF_VERSION, 0x0400FACC);
pcie40_write32_ctrl(state, P40_DMA_CTRL_OFF_LINK_ID, link_id);
pcie40_write32_ctrl(state, P40_DMA_CTRL_OFF_MAIN_GEN_FIXED, 0x6243484C);
pcie40_write32_ctrl(state, P40_DMA_CTRL_OFF_META_PACKING, 13);
pcie40_write32_ctrl(state, P40_DMA_CTRL_OFF_PCIE_GEN, 3);
state->link_id = link_id;
if (state->link_id == 0) {
state->dev_id = baseid; // This device will get an even id
} else {
state->dev_id = baseid + 1; // This device will get an odd id
}
list_for_each_entry(li, &pcie40_emu_inst_list, list) {
if ((state->link_id == 0 && li->dev_id % 2 == 0)
|| (state->link_id != 0 && li->dev_id % 2 != 0)) {
if (li->dev_id >= state->dev_id) {
state->dev_id = li->dev_id + 2;
}
}
}
list_add(&state->list, &pcie40_emu_inst_list);
pcie40_write32_ctrl(state, P40_DMA_CTRL_OFF_CHIP_ID_HI, 0x00CE40);
pcie40_write32_ctrl(state, P40_DMA_CTRL_OFF_CHIP_ID_LO, state->dev_id / 2 + 1);
rc |= pcie40_ecs_emu_probe(state);
rc |= pcie40_daq_emu_probe(state);
return rc;
//if (state->bar_size[1]) {
// kfree(state->bar1_regs);
// state->bar1_regs = NULL;
//}
err_bar2_alloc:
if (state->bar_size[1]) {
vfree(state->bar1_regs);
state->bar1_regs = NULL;
}
err_bar1_alloc:
if (state->bar_size[0]) {
vfree(state->bar0_regs);
state->bar0_regs = NULL;
}
err_bar0_alloc:
kfree(state);
err_kzalloc:
return rc;
}
static void pcie40_emu_remove(struct pcie40_state *state)
{
list_del(&state->list);
pcie40_ecs_emu_remove(state);
pcie40_daq_emu_remove(state);
if (state->bar_size[0]) {
vfree(state->bar0_regs);
state->bar0_regs = NULL;
}
if (state->bar_size[1]) {
vfree(state->bar1_regs);
state->bar1_regs = NULL;
}
if (state->bar_size[2]) {
vfree(state->bar2_regs);
state->bar2_regs = NULL;
}
kfree(state);
}
static int __init pcie40_emu_init(void)
{
int rc = 0;
int i;
baseid &= ~1; // the base Device ID must be even
rc = pcie40_ecs_init();
if (rc < 0)
return rc;
rc = pcie40_daq_init();
if (rc < 0)
return rc;
for (i = 0; i < numboards; ++i) {
pcie40_emu_probe(0);
pcie40_emu_probe(1);
}
return rc;
}
static void __exit pcie40_emu_exit(void)
{
struct pcie40_state *li, *ln;
list_for_each_entry_safe(li, ln, &pcie40_emu_inst_list, list) {
pcie40_emu_remove(li);
}
pcie40_daq_exit();
pcie40_ecs_exit();
}
module_init(pcie40_emu_init);
module_exit(pcie40_emu_exit);
MODULE_VERSION(DAQ40_VER_REL);
MODULE_LICENSE("GPL");
//TODO: MODULE_AUTHOR
//TODO: MODULE_DESCRIPTION
#!/bin/bash
CACHE_PATH=/tmp/pcie40_reload.cache
#ug`pcie40_reload.description`
# This command must be executed to reload the PCIe40 driver after an FPGA has been reprogrammed.
# _ The tool performs the following steps, in order:
RELOAD_MODULE=0
RELOAD_ALL=0
USE_CACHE=0
FLUSH_CACHE=0
QUIET=0
#ug`pcie40_reload.synopsis`
# *pcie40_reload* [-m] [-a] [-c] [-f] [-q]
function usage {
echo "pcie40_reload [-m] [-a] [-c] [-f] [-q]" >&2
echo " -m reload kernel module" >&2
echo " -a reload all devices" >&2
echo " -c always read cache" >&2
echo " -f always overwrite cache" >&2
echo " -q suppress dmesg output" >&2
}
while getopts "macfqh" opt; do
case $opt in
#ug`pcie40_reload.options`mod
# *-m*::
# Remove and then re-insert module into kernel (this is required after a driver update).
m) RELOAD_MODULE=1
;;
#ug`pcie40_reload.options`all
# *-a*::
# Reload all interface (by default only the interfaces where the FPGA has been reprogrammed are reloaded).
a) RELOAD_ALL=1
;;
#ug`pcie40_reload.options`cache
# *-c*::
# Take the device list from the local cache, if it exists. By default the cache is used only if no PCIe40 interface appears under sysfs. With this option the cache contents are used instead. This can be useful in case a reload was issued too early (while one FPGA was still being reprogrammed) and the corresponding sysfs nodes do not exist anymore.
c) USE_CACHE=1
;;
#ug`pcie40_reload.options`flush
# *-f*::
# Forces a cache update. By default the cache is updated only if additional entries have been detected compared to the cache contents. With this option the cache will be overwritten regardless.
f) FLUSH_CACHE=1
;;
#ug`pcie40_reload.options`quiet
# *-q*::
# Suppress dmesg output. Without this option a partial dmesg log is printed for troubleshooting in case of errors.
q) QUIET=1
;;
h)
usage
exit 1
;;
\?)
echo "Invalid option: -${OPTARG}" >&2
exit 1
;;
:)
echo "Option -${OPTARG} requires an argument" >&2
exit 1
;;
esac
done
if [ "${RELOAD_MODULE}" -eq 0 ]; then
#INSTALLED_SRCVERSION=`modinfo -F srcversion pcie40`
LOADED_SRCVERSION=$(</sys/module/pcie40/srcversion)
#if [ "${LOADED_SRCVERSION}" != "${INSTALLED_SRCVERSION}" ]; then
#echo "Warning: loaded module version does not match installed version, run \`pcie40_reload -m\` to use the installed module instead" >&2
#fi;
else
RELOAD_ALL=1
fi;
devs=($(find /sys/devices/pci0000\:* -name pcie40_interface -exec cat {} \; -exec echo -n ' ' \; -execdir pwd \; | sort -nr | cut -f 2 -d ' ' 2>/dev/null))
if [ -z "${devs}" ]; then
if [ -f "${CACHE_PATH}" ]; then
echo "No PCIe40 devices appear in sysfs, using cache"
devs=($(<${CACHE_PATH}))
else
echo "No devices detected! Assuming the FPGA was programmed correctly, you might have to reboot"
exit 1
fi
elif [ ${USE_CACHE} -eq 1 ]; then
if [ -f "${CACHE_PATH}" ]; then
echo "Using PCIe40 interface list from cache"
devs=($(<${CACHE_PATH}))
else
echo "No PCIe40 interface cache present, if not all PCIe40 boards are detected you might have to reboot"
exit 1
fi
fi
if [ -f "${CACHE_PATH}" ]; then
devs_cached=($(<${CACHE_PATH}))
if [ ${FLUSH_CACHE} -eq 1 ] || [ ${#devs_cached[@]} -lt ${#devs[@]} ]; then
echo "Updating cache"
echo "${devs}" > ${CACHE_PATH}
fi
else
echo "Creating cache"
echo "${devs}" > ${CACHE_PATH}
fi
ok=1
pids_ctrl=
pids_bar0=
pids_bar2=
get_pids() {
# $1: file to check
# $2: variable to append pids to
pids=`lsof -t $1`
for pid in $pids; do
ps -p $pid --no-headers -o " %U%p%c"
eval "${2}+=\ ${pid}"
done
}
if_id=${#devs[@]}
for dev in ${devs[@]}; do
(( if_id -= 1 ))
if [ ${RELOAD_ALL} -eq 0 ] && [ -f "${dev}/pcie40_loaded" ] && [ $(< ${dev}/pcie40_loaded) -eq 1 ]; then
continue;
fi;
echo "PCIe40 interface ${if_id}:"
if [ $((if_id % 2)) -eq 0 ]; then
if [ -e /dev/pcie40_${if_id}_bar0 ]; then
echo " BAR0 used by:"
get_pids /dev/pcie40_${if_id}_bar0 pids_bar0
else
echo " BAR0 missing!"
fi
if [ -e /dev/pcie40_${if_id}_bar2 ]; then
echo " BAR2 used by:"
get_pids /dev/pcie40_${if_id}_bar2 pids_bar2
else
echo " BAR2 missing!"
fi
fi
if [ -e /dev/pcie40_${if_id}_ctrl ]; then
echo " CTRL used by:"
get_pids /dev/pcie40_${if_id}_ctrl pids_ctrl
else
echo " CTRL missing!"
fi
done
#ug`pcie40_reload.steps`1
# . Terminates any process currently using the PCIe40 boards to be reloaded
if [ -n "$pids_bar0" ] || [ -n "$pids_bar2" ] || [ -n "$pids_ctrl" ]; then
echo -n "Stopping processes... "
kill $pids_bar0 $pids_bar2 $pids_ctrl
echo OK
fi
#ug`pcie40_reload.steps`2
# . For every PCIe40 interface:
# .. Disconnects the interface via _sysfs_
# .. Requests a PCI rescan on the upstream device
# .. Checks if the driver detected successfully the interface
devs_removed=
if [ ${RELOAD_MODULE} -eq 1 ]; then
sudo rmmod lhcb_pcie40
sudo modprobe lhcb_pcie40
else
for dev in ${devs[@]}; do
if [ ${RELOAD_ALL} -eq 0 ] && [ -f "${dev}/pcie40_loaded" ] && [ $(< ${dev}/pcie40_loaded) -eq 1 ]; then
continue;
fi;
if [ ! -d "${dev}" ]; then
devs_removed="${dev} ${devs_removed}"
continue;
fi;
echo "Removing device ${dev}"
if [ -f "${dev}/device" ] && [ $(< ${dev}/device) = 0xce40 ]; then
if [ -e ${dev}/driver ]; then
echo " Driver: OK"
else
echo " Driver: NO"
fi;
echo 1 | sudo tee ${dev}/remove > /dev/null
devs_removed="${dev} ${devs_removed}"
fi;
done
for dev in ${devs_removed}; do
echo "Reloading device ${dev}"
rp=`dirname ${dev}`
echo 1 | sudo tee ${rp}/rescan > /dev/null
if [ -e ${dev}/driver ]; then
echo " Driver: OK"
else
echo " Driver: NO"
ok=0
fi;
done
fi;
if_id=${#devs[@]}
for dev in ${devs[@]}; do
(( if_id -= 1 ))
echo "PCIe40 interface ${if_id}:"
#ug`pcie40_reload.steps`3
# . Checks the presence of the ECS registers
if [ $((if_id % 2)) -eq 0 ]; then
echo -n " BAR0: "
if [ -e /dev/pcie40_${if_id}_bar0 ]; then
echo "OK"
else
echo "NO"
ok=0
fi
echo -n " BAR2: "
if [ -e /dev/pcie40_${if_id}_bar2 ]; then
echo "OK"
else
echo "NO"
ok=0
fi
fi
#ug`pcie40_reload.steps`4
# . Checks the presence of the DMA controllers
echo -n " CTRL: "
if [ -e /dev/pcie40_${if_id}_ctrl ]; then
echo "OK"
else
echo "NO"
ok=0
fi
done
if [ "$ok" -eq 0 ] && [ ${QUIET} -eq 0 ]; then
echo "Errors found, debug output follows:"
dmesg | grep -E "(P40:)|(P40DAQ:)" | tail -n 100
exit 1
fi
#ug`pcie40_reload.exit`
# 0 on success, 1 if issues have been encountered. In the latter case the last diagnostic messages from the driver are also printed to standard output.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment