2 * VMEbus User access driver
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
8 * Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/refcount.h>
21 #include <linux/cdev.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/ioctl.h>
28 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/pagemap.h>
32 #include <linux/pci.h>
33 #include <linux/mutex.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/syscalls.h>
37 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/vme.h>
45 static const char driver_name[] = "vme_user";
47 static int bus[VME_USER_BUS_MAX];
48 static unsigned int bus_num;
50 /* Currently Documentation/admin-guide/devices.rst defines the
54 * 0 = /dev/bus/vme/m0 First master image
55 * 1 = /dev/bus/vme/m1 Second master image
56 * 2 = /dev/bus/vme/m2 Third master image
57 * 3 = /dev/bus/vme/m3 Fourth master image
58 * 4 = /dev/bus/vme/s0 First slave image
59 * 5 = /dev/bus/vme/s1 Second slave image
60 * 6 = /dev/bus/vme/s2 Third slave image
61 * 7 = /dev/bus/vme/s3 Fourth slave image
62 * 8 = /dev/bus/vme/ctl Control
64 * It is expected that all VME bus drivers will use the
65 * same interface. For interface documentation see
66 * http://www.vmelinux.org/.
68 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
69 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
70 * We'll run with this for now as far as possible, however it probably makes
71 * sense to get rid of the old mappings and just do everything dynamically.
73 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
74 * defined above and try to support at least some of the interface from
75 * http://www.vmelinux.org/ as an alternative the driver can be written
76 * providing a saner interface later.
78 * The vmelinux.org driver never supported slave images, the devices reserved
79 * for slaves were repurposed to support all 8 master images on the UniverseII!
80 * We shall support 4 masters and 4 slaves with this driver.
82 #define VME_MAJOR 221 /* VME Major Device Number */
83 #define VME_DEVS 9 /* Number of dev entries */
85 #define MASTER_MINOR 0
89 #define CONTROL_MINOR 8
91 #define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
94 * Structure to handle image related parameters.
97 void *kern_buf; /* Buffer address in kernel space */
98 dma_addr_t pci_buf; /* Buffer address in PCI address space */
99 unsigned long long size_buf; /* Buffer size */
100 struct mutex mutex; /* Mutex for locking image */
101 struct device *device; /* Sysfs device */
102 struct vme_resource *resource; /* VME resource */
103 int mmap_count; /* Number of current mmap's */
106 static struct image_desc image[VME_DEVS];
108 static struct cdev *vme_user_cdev; /* Character device */
109 static struct class *vme_user_sysfs_class; /* Sysfs class */
110 static struct vme_dev *vme_user_bridge; /* Pointer to user device */
112 static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
113 MASTER_MINOR, MASTER_MINOR,
114 SLAVE_MINOR, SLAVE_MINOR,
115 SLAVE_MINOR, SLAVE_MINOR,
119 struct vme_user_vma_priv {
124 static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
129 if (count > image[minor].size_buf)
130 count = image[minor].size_buf;
132 copied = vme_master_read(image[minor].resource, image[minor].kern_buf,
137 if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
143 static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
144 size_t count, loff_t *ppos)
146 if (count > image[minor].size_buf)
147 count = image[minor].size_buf;
149 if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
152 return vme_master_write(image[minor].resource, image[minor].kern_buf,
156 static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
157 size_t count, loff_t *ppos)
161 image_ptr = image[minor].kern_buf + *ppos;
162 if (__copy_to_user(buf, image_ptr, (unsigned long)count))
168 static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
169 size_t count, loff_t *ppos)
173 image_ptr = image[minor].kern_buf + *ppos;
174 if (__copy_from_user(image_ptr, buf, (unsigned long)count))
180 static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
183 unsigned int minor = MINOR(file_inode(file)->i_rdev);
187 if (minor == CONTROL_MINOR)
190 mutex_lock(&image[minor].mutex);
192 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
193 image_size = vme_get_size(image[minor].resource);
195 /* Ensure we are starting at a valid location */
196 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
197 mutex_unlock(&image[minor].mutex);
201 /* Ensure not reading past end of the image */
202 if (*ppos + count > image_size)
203 count = image_size - *ppos;
205 switch (type[minor]) {
207 retval = resource_to_user(minor, buf, count, ppos);
210 retval = buffer_to_user(minor, buf, count, ppos);
216 mutex_unlock(&image[minor].mutex);
223 static ssize_t vme_user_write(struct file *file, const char __user *buf,
224 size_t count, loff_t *ppos)
226 unsigned int minor = MINOR(file_inode(file)->i_rdev);
230 if (minor == CONTROL_MINOR)
233 mutex_lock(&image[minor].mutex);
235 image_size = vme_get_size(image[minor].resource);
237 /* Ensure we are starting at a valid location */
238 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
239 mutex_unlock(&image[minor].mutex);
243 /* Ensure not reading past end of the image */
244 if (*ppos + count > image_size)
245 count = image_size - *ppos;
247 switch (type[minor]) {
249 retval = resource_from_user(minor, buf, count, ppos);
252 retval = buffer_from_user(minor, buf, count, ppos);
258 mutex_unlock(&image[minor].mutex);
266 static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
268 unsigned int minor = MINOR(file_inode(file)->i_rdev);
272 switch (type[minor]) {
275 mutex_lock(&image[minor].mutex);
276 image_size = vme_get_size(image[minor].resource);
277 res = fixed_size_llseek(file, off, whence, image_size);
278 mutex_unlock(&image[minor].mutex);
286 * The ioctls provided by the old VME access method (the one at vmelinux.org)
287 * are most certainly wrong as the effectively push the registers layout
288 * through to user space. Given that the VME core can handle multiple bridges,
289 * with different register layouts this is most certainly not the way to go.
291 * We aren't using the structures defined in the Motorola driver either - these
292 * are also quite low level, however we should use the definitions that have
293 * already been defined.
295 static int vme_user_ioctl(struct inode *inode, struct file *file,
296 unsigned int cmd, unsigned long arg)
298 struct vme_master master;
299 struct vme_slave slave;
300 struct vme_irq_id irq_req;
301 unsigned long copied;
302 unsigned int minor = MINOR(inode->i_rdev);
305 void __user *argp = (void __user *)arg;
307 switch (type[minor]) {
311 copied = copy_from_user(&irq_req, argp,
314 pr_warn("Partial copy from userspace\n");
318 return vme_irq_generate(vme_user_bridge,
326 memset(&master, 0, sizeof(master));
328 /* XXX We do not want to push aspace, cycle and width
329 * to userspace as they are
331 retval = vme_master_get(image[minor].resource,
334 &master.size, &master.aspace,
335 &master.cycle, &master.dwidth);
337 copied = copy_to_user(argp, &master,
340 pr_warn("Partial copy to userspace\n");
348 if (image[minor].mmap_count != 0) {
349 pr_warn("Can't adjust mapped window\n");
353 copied = copy_from_user(&master, argp, sizeof(master));
355 pr_warn("Partial copy from userspace\n");
359 /* XXX We do not want to push aspace, cycle and width
360 * to userspace as they are
362 return vme_master_set(image[minor].resource,
363 master.enable, master.vme_addr, master.size,
364 master.aspace, master.cycle, master.dwidth);
372 memset(&slave, 0, sizeof(slave));
374 /* XXX We do not want to push aspace, cycle and width
375 * to userspace as they are
377 retval = vme_slave_get(image[minor].resource,
378 &slave.enable, &slave.vme_addr,
379 &slave.size, &pci_addr,
380 &slave.aspace, &slave.cycle);
382 copied = copy_to_user(argp, &slave,
385 pr_warn("Partial copy to userspace\n");
393 copied = copy_from_user(&slave, argp, sizeof(slave));
395 pr_warn("Partial copy from userspace\n");
399 /* XXX We do not want to push aspace, cycle and width
400 * to userspace as they are
402 return vme_slave_set(image[minor].resource,
403 slave.enable, slave.vme_addr, slave.size,
404 image[minor].pci_buf, slave.aspace,
416 vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
419 struct inode *inode = file_inode(file);
420 unsigned int minor = MINOR(inode->i_rdev);
422 mutex_lock(&image[minor].mutex);
423 ret = vme_user_ioctl(inode, file, cmd, arg);
424 mutex_unlock(&image[minor].mutex);
429 static void vme_user_vm_open(struct vm_area_struct *vma)
431 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
433 refcount_inc(&vma_priv->refcnt);
436 static void vme_user_vm_close(struct vm_area_struct *vma)
438 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
439 unsigned int minor = vma_priv->minor;
441 if (!refcount_dec_and_test(&vma_priv->refcnt))
444 mutex_lock(&image[minor].mutex);
445 image[minor].mmap_count--;
446 mutex_unlock(&image[minor].mutex);
451 static const struct vm_operations_struct vme_user_vm_ops = {
452 .open = vme_user_vm_open,
453 .close = vme_user_vm_close,
456 static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
459 struct vme_user_vma_priv *vma_priv;
461 mutex_lock(&image[minor].mutex);
463 err = vme_master_mmap(image[minor].resource, vma);
465 mutex_unlock(&image[minor].mutex);
469 vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL);
471 mutex_unlock(&image[minor].mutex);
475 vma_priv->minor = minor;
476 refcount_set(&vma_priv->refcnt, 1);
477 vma->vm_ops = &vme_user_vm_ops;
478 vma->vm_private_data = vma_priv;
480 image[minor].mmap_count++;
482 mutex_unlock(&image[minor].mutex);
487 static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
489 unsigned int minor = MINOR(file_inode(file)->i_rdev);
491 if (type[minor] == MASTER_MINOR)
492 return vme_user_master_mmap(minor, vma);
497 static const struct file_operations vme_user_fops = {
498 .read = vme_user_read,
499 .write = vme_user_write,
500 .llseek = vme_user_llseek,
501 .unlocked_ioctl = vme_user_unlocked_ioctl,
502 .compat_ioctl = vme_user_unlocked_ioctl,
503 .mmap = vme_user_mmap,
506 static int vme_user_match(struct vme_dev *vdev)
510 int cur_bus = vme_bus_num(vdev);
511 int cur_slot = vme_slot_num(vdev);
513 for (i = 0; i < bus_num; i++)
514 if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
521 * In this simple access driver, the old behaviour is being preserved as much
522 * as practical. We will therefore reserve the buffers and request the images
523 * here so that we don't have to do it later.
525 static int vme_user_probe(struct vme_dev *vdev)
530 /* Save pointer to the bridge device */
531 if (vme_user_bridge) {
532 dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
536 vme_user_bridge = vdev;
538 /* Initialise descriptors */
539 for (i = 0; i < VME_DEVS; i++) {
540 image[i].kern_buf = NULL;
541 image[i].pci_buf = 0;
542 mutex_init(&image[i].mutex);
543 image[i].device = NULL;
544 image[i].resource = NULL;
547 /* Assign major and minor numbers for the driver */
548 err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
551 dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
556 /* Register the driver as a char device */
557 vme_user_cdev = cdev_alloc();
558 if (!vme_user_cdev) {
562 vme_user_cdev->ops = &vme_user_fops;
563 vme_user_cdev->owner = THIS_MODULE;
564 err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
568 /* Request slave resources and allocate buffers (128kB wide) */
569 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
570 /* XXX Need to properly request attributes */
571 /* For ca91cx42 bridge there are only two slave windows
572 * supporting A16 addressing, so we request A24 supported
575 image[i].resource = vme_slave_request(vme_user_bridge,
577 if (!image[i].resource) {
579 "Unable to allocate slave resource\n");
583 image[i].size_buf = PCI_BUF_SIZE;
584 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
585 image[i].size_buf, &image[i].pci_buf);
586 if (!image[i].kern_buf) {
588 "Unable to allocate memory for buffer\n");
589 image[i].pci_buf = 0;
590 vme_slave_free(image[i].resource);
597 * Request master resources allocate page sized buffers for small
600 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
601 /* XXX Need to properly request attributes */
602 image[i].resource = vme_master_request(vme_user_bridge,
603 VME_A32, VME_SCT, VME_D32);
604 if (!image[i].resource) {
606 "Unable to allocate master resource\n");
610 image[i].size_buf = PCI_BUF_SIZE;
611 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
612 if (!image[i].kern_buf) {
614 vme_master_free(image[i].resource);
619 /* Create sysfs entries - on udev systems this creates the dev files */
620 vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
621 if (IS_ERR(vme_user_sysfs_class)) {
622 dev_err(&vdev->dev, "Error creating vme_user class.\n");
623 err = PTR_ERR(vme_user_sysfs_class);
627 /* Add sysfs Entries */
628 for (i = 0; i < VME_DEVS; i++) {
633 name = "bus/vme/m%d";
636 name = "bus/vme/ctl";
639 name = "bus/vme/s%d";
646 num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
647 image[i].device = device_create(vme_user_sysfs_class, NULL,
648 MKDEV(VME_MAJOR, i), NULL, name, num);
649 if (IS_ERR(image[i].device)) {
650 dev_info(&vdev->dev, "Error creating sysfs device\n");
651 err = PTR_ERR(image[i].device);
661 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
663 class_destroy(vme_user_sysfs_class);
665 /* Ensure counter set correctly to unalloc all master windows */
668 while (i > MASTER_MINOR) {
670 kfree(image[i].kern_buf);
671 vme_master_free(image[i].resource);
675 * Ensure counter set correctly to unalloc all slave windows and buffers
679 while (i > SLAVE_MINOR) {
681 vme_free_consistent(image[i].resource, image[i].size_buf,
682 image[i].kern_buf, image[i].pci_buf);
683 vme_slave_free(image[i].resource);
686 cdev_del(vme_user_cdev);
688 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
694 static int vme_user_remove(struct vme_dev *dev)
698 /* Remove sysfs Entries */
699 for (i = 0; i < VME_DEVS; i++) {
700 mutex_destroy(&image[i].mutex);
701 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
703 class_destroy(vme_user_sysfs_class);
705 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
706 kfree(image[i].kern_buf);
707 vme_master_free(image[i].resource);
710 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
711 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
712 vme_free_consistent(image[i].resource, image[i].size_buf,
713 image[i].kern_buf, image[i].pci_buf);
714 vme_slave_free(image[i].resource);
717 /* Unregister device driver */
718 cdev_del(vme_user_cdev);
720 /* Unregister the major and minor device numbers */
721 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
726 static struct vme_driver vme_user_driver = {
728 .match = vme_user_match,
729 .probe = vme_user_probe,
730 .remove = vme_user_remove,
733 static int __init vme_user_init(void)
737 pr_info("VME User Space Access Driver\n");
740 pr_err("No cards, skipping registration\n");
745 /* Let's start by supporting one bus, we can support more than one
746 * in future revisions if that ever becomes necessary.
748 if (bus_num > VME_USER_BUS_MAX) {
749 pr_err("Driver only able to handle %d buses\n",
751 bus_num = VME_USER_BUS_MAX;
755 * Here we just register the maximum number of devices we can and
756 * leave vme_user_match() to allow only 1 to go through to probe().
757 * This way, if we later want to allow multiple user access devices,
758 * we just change the code in vme_user_match().
760 retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
771 static void __exit vme_user_exit(void)
773 vme_unregister_driver(&vme_user_driver);
776 MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
777 module_param_array(bus, int, &bus_num, 0000);
779 MODULE_DESCRIPTION("VME User Space Access Driver");
780 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
781 MODULE_LICENSE("GPL");
783 module_init(vme_user_init);
784 module_exit(vme_user_exit);