GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / staging / vme / devices / vme_user.c
1 /*
2  * VMEbus User access driver
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by:
8  *   Tom Armistead and Ajit Prem
9  *     Copyright 2004 Motorola Inc.
10  *
11  *
12  * This program is free software; you can redistribute  it and/or modify it
13  * under  the terms of  the GNU General  Public License as published by the
14  * Free Software Foundation;  either version 2 of the  License, or (at your
15  * option) any later version.
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/refcount.h>
21 #include <linux/cdev.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/ioctl.h>
28 #include <linux/kernel.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/pagemap.h>
32 #include <linux/pci.h>
33 #include <linux/mutex.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/syscalls.h>
37 #include <linux/types.h>
38
39 #include <linux/io.h>
40 #include <linux/uaccess.h>
41 #include <linux/vme.h>
42
43 #include "vme_user.h"
44
45 static const char driver_name[] = "vme_user";
46
47 static int bus[VME_USER_BUS_MAX];
48 static unsigned int bus_num;
49
50 /* Currently Documentation/admin-guide/devices.rst defines the
51  * following for VME:
52  *
53  * 221 char     VME bus
54  *                0 = /dev/bus/vme/m0           First master image
55  *                1 = /dev/bus/vme/m1           Second master image
56  *                2 = /dev/bus/vme/m2           Third master image
57  *                3 = /dev/bus/vme/m3           Fourth master image
58  *                4 = /dev/bus/vme/s0           First slave image
59  *                5 = /dev/bus/vme/s1           Second slave image
60  *                6 = /dev/bus/vme/s2           Third slave image
61  *                7 = /dev/bus/vme/s3           Fourth slave image
62  *                8 = /dev/bus/vme/ctl          Control
63  *
64  *              It is expected that all VME bus drivers will use the
65  *              same interface.  For interface documentation see
66  *              http://www.vmelinux.org/.
67  *
68  * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
69  * even support the tsi148 chipset (which has 8 master and 8 slave windows).
70  * We'll run with this for now as far as possible, however it probably makes
71  * sense to get rid of the old mappings and just do everything dynamically.
72  *
73  * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
74  * defined above and try to support at least some of the interface from
75  * http://www.vmelinux.org/ as an alternative the driver can be written
76  * providing a saner interface later.
77  *
78  * The vmelinux.org driver never supported slave images, the devices reserved
79  * for slaves were repurposed to support all 8 master images on the UniverseII!
80  * We shall support 4 masters and 4 slaves with this driver.
81  */
82 #define VME_MAJOR       221     /* VME Major Device Number */
83 #define VME_DEVS        9       /* Number of dev entries */
84
85 #define MASTER_MINOR    0
86 #define MASTER_MAX      3
87 #define SLAVE_MINOR     4
88 #define SLAVE_MAX       7
89 #define CONTROL_MINOR   8
90
91 #define PCI_BUF_SIZE  0x20000   /* Size of one slave image buffer */
92
93 /*
94  * Structure to handle image related parameters.
95  */
96 struct image_desc {
97         void *kern_buf; /* Buffer address in kernel space */
98         dma_addr_t pci_buf;     /* Buffer address in PCI address space */
99         unsigned long long size_buf;    /* Buffer size */
100         struct mutex mutex;     /* Mutex for locking image */
101         struct device *device;  /* Sysfs device */
102         struct vme_resource *resource;  /* VME resource */
103         int mmap_count;         /* Number of current mmap's */
104 };
105
106 static struct image_desc image[VME_DEVS];
107
108 static struct cdev *vme_user_cdev;              /* Character device */
109 static struct class *vme_user_sysfs_class;      /* Sysfs class */
110 static struct vme_dev *vme_user_bridge;         /* Pointer to user device */
111
112 static const int type[VME_DEVS] = {     MASTER_MINOR,   MASTER_MINOR,
113                                         MASTER_MINOR,   MASTER_MINOR,
114                                         SLAVE_MINOR,    SLAVE_MINOR,
115                                         SLAVE_MINOR,    SLAVE_MINOR,
116                                         CONTROL_MINOR
117                                 };
118
119 struct vme_user_vma_priv {
120         unsigned int minor;
121         refcount_t refcnt;
122 };
123
124 static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
125                                 loff_t *ppos)
126 {
127         ssize_t copied = 0;
128
129         if (count > image[minor].size_buf)
130                 count = image[minor].size_buf;
131
132         copied = vme_master_read(image[minor].resource, image[minor].kern_buf,
133                                  count, *ppos);
134         if (copied < 0)
135                 return (int)copied;
136
137         if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
138                 return -EFAULT;
139
140         return copied;
141 }
142
143 static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
144                                   size_t count, loff_t *ppos)
145 {
146         if (count > image[minor].size_buf)
147                 count = image[minor].size_buf;
148
149         if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
150                 return -EFAULT;
151
152         return vme_master_write(image[minor].resource, image[minor].kern_buf,
153                                 count, *ppos);
154 }
155
156 static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
157                               size_t count, loff_t *ppos)
158 {
159         void *image_ptr;
160
161         image_ptr = image[minor].kern_buf + *ppos;
162         if (__copy_to_user(buf, image_ptr, (unsigned long)count))
163                 return -EFAULT;
164
165         return count;
166 }
167
168 static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
169                                 size_t count, loff_t *ppos)
170 {
171         void *image_ptr;
172
173         image_ptr = image[minor].kern_buf + *ppos;
174         if (__copy_from_user(image_ptr, buf, (unsigned long)count))
175                 return -EFAULT;
176
177         return count;
178 }
179
180 static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
181                              loff_t *ppos)
182 {
183         unsigned int minor = MINOR(file_inode(file)->i_rdev);
184         ssize_t retval;
185         size_t image_size;
186
187         if (minor == CONTROL_MINOR)
188                 return 0;
189
190         mutex_lock(&image[minor].mutex);
191
192         /* XXX Do we *really* want this helper - we can use vme_*_get ? */
193         image_size = vme_get_size(image[minor].resource);
194
195         /* Ensure we are starting at a valid location */
196         if ((*ppos < 0) || (*ppos > (image_size - 1))) {
197                 mutex_unlock(&image[minor].mutex);
198                 return 0;
199         }
200
201         /* Ensure not reading past end of the image */
202         if (*ppos + count > image_size)
203                 count = image_size - *ppos;
204
205         switch (type[minor]) {
206         case MASTER_MINOR:
207                 retval = resource_to_user(minor, buf, count, ppos);
208                 break;
209         case SLAVE_MINOR:
210                 retval = buffer_to_user(minor, buf, count, ppos);
211                 break;
212         default:
213                 retval = -EINVAL;
214         }
215
216         mutex_unlock(&image[minor].mutex);
217         if (retval > 0)
218                 *ppos += retval;
219
220         return retval;
221 }
222
223 static ssize_t vme_user_write(struct file *file, const char __user *buf,
224                               size_t count, loff_t *ppos)
225 {
226         unsigned int minor = MINOR(file_inode(file)->i_rdev);
227         ssize_t retval;
228         size_t image_size;
229
230         if (minor == CONTROL_MINOR)
231                 return 0;
232
233         mutex_lock(&image[minor].mutex);
234
235         image_size = vme_get_size(image[minor].resource);
236
237         /* Ensure we are starting at a valid location */
238         if ((*ppos < 0) || (*ppos > (image_size - 1))) {
239                 mutex_unlock(&image[minor].mutex);
240                 return 0;
241         }
242
243         /* Ensure not reading past end of the image */
244         if (*ppos + count > image_size)
245                 count = image_size - *ppos;
246
247         switch (type[minor]) {
248         case MASTER_MINOR:
249                 retval = resource_from_user(minor, buf, count, ppos);
250                 break;
251         case SLAVE_MINOR:
252                 retval = buffer_from_user(minor, buf, count, ppos);
253                 break;
254         default:
255                 retval = -EINVAL;
256         }
257
258         mutex_unlock(&image[minor].mutex);
259
260         if (retval > 0)
261                 *ppos += retval;
262
263         return retval;
264 }
265
266 static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
267 {
268         unsigned int minor = MINOR(file_inode(file)->i_rdev);
269         size_t image_size;
270         loff_t res;
271
272         switch (type[minor]) {
273         case MASTER_MINOR:
274         case SLAVE_MINOR:
275                 mutex_lock(&image[minor].mutex);
276                 image_size = vme_get_size(image[minor].resource);
277                 res = fixed_size_llseek(file, off, whence, image_size);
278                 mutex_unlock(&image[minor].mutex);
279                 return res;
280         }
281
282         return -EINVAL;
283 }
284
285 /*
286  * The ioctls provided by the old VME access method (the one at vmelinux.org)
287  * are most certainly wrong as the effectively push the registers layout
288  * through to user space. Given that the VME core can handle multiple bridges,
289  * with different register layouts this is most certainly not the way to go.
290  *
291  * We aren't using the structures defined in the Motorola driver either - these
292  * are also quite low level, however we should use the definitions that have
293  * already been defined.
294  */
295 static int vme_user_ioctl(struct inode *inode, struct file *file,
296                           unsigned int cmd, unsigned long arg)
297 {
298         struct vme_master master;
299         struct vme_slave slave;
300         struct vme_irq_id irq_req;
301         unsigned long copied;
302         unsigned int minor = MINOR(inode->i_rdev);
303         int retval;
304         dma_addr_t pci_addr;
305         void __user *argp = (void __user *)arg;
306
307         switch (type[minor]) {
308         case CONTROL_MINOR:
309                 switch (cmd) {
310                 case VME_IRQ_GEN:
311                         copied = copy_from_user(&irq_req, argp,
312                                                 sizeof(irq_req));
313                         if (copied) {
314                                 pr_warn("Partial copy from userspace\n");
315                                 return -EFAULT;
316                         }
317
318                         return vme_irq_generate(vme_user_bridge,
319                                                   irq_req.level,
320                                                   irq_req.statid);
321                 }
322                 break;
323         case MASTER_MINOR:
324                 switch (cmd) {
325                 case VME_GET_MASTER:
326                         memset(&master, 0, sizeof(master));
327
328                         /* XXX  We do not want to push aspace, cycle and width
329                          *      to userspace as they are
330                          */
331                         retval = vme_master_get(image[minor].resource,
332                                                 &master.enable,
333                                                 &master.vme_addr,
334                                                 &master.size, &master.aspace,
335                                                 &master.cycle, &master.dwidth);
336
337                         copied = copy_to_user(argp, &master,
338                                               sizeof(master));
339                         if (copied) {
340                                 pr_warn("Partial copy to userspace\n");
341                                 return -EFAULT;
342                         }
343
344                         return retval;
345
346                 case VME_SET_MASTER:
347
348                         if (image[minor].mmap_count != 0) {
349                                 pr_warn("Can't adjust mapped window\n");
350                                 return -EPERM;
351                         }
352
353                         copied = copy_from_user(&master, argp, sizeof(master));
354                         if (copied) {
355                                 pr_warn("Partial copy from userspace\n");
356                                 return -EFAULT;
357                         }
358
359                         /* XXX  We do not want to push aspace, cycle and width
360                          *      to userspace as they are
361                          */
362                         return vme_master_set(image[minor].resource,
363                                 master.enable, master.vme_addr, master.size,
364                                 master.aspace, master.cycle, master.dwidth);
365
366                         break;
367                 }
368                 break;
369         case SLAVE_MINOR:
370                 switch (cmd) {
371                 case VME_GET_SLAVE:
372                         memset(&slave, 0, sizeof(slave));
373
374                         /* XXX  We do not want to push aspace, cycle and width
375                          *      to userspace as they are
376                          */
377                         retval = vme_slave_get(image[minor].resource,
378                                                &slave.enable, &slave.vme_addr,
379                                                &slave.size, &pci_addr,
380                                                &slave.aspace, &slave.cycle);
381
382                         copied = copy_to_user(argp, &slave,
383                                               sizeof(slave));
384                         if (copied) {
385                                 pr_warn("Partial copy to userspace\n");
386                                 return -EFAULT;
387                         }
388
389                         return retval;
390
391                 case VME_SET_SLAVE:
392
393                         copied = copy_from_user(&slave, argp, sizeof(slave));
394                         if (copied) {
395                                 pr_warn("Partial copy from userspace\n");
396                                 return -EFAULT;
397                         }
398
399                         /* XXX  We do not want to push aspace, cycle and width
400                          *      to userspace as they are
401                          */
402                         return vme_slave_set(image[minor].resource,
403                                 slave.enable, slave.vme_addr, slave.size,
404                                 image[minor].pci_buf, slave.aspace,
405                                 slave.cycle);
406
407                         break;
408                 }
409                 break;
410         }
411
412         return -EINVAL;
413 }
414
415 static long
416 vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
417 {
418         int ret;
419         struct inode *inode = file_inode(file);
420         unsigned int minor = MINOR(inode->i_rdev);
421
422         mutex_lock(&image[minor].mutex);
423         ret = vme_user_ioctl(inode, file, cmd, arg);
424         mutex_unlock(&image[minor].mutex);
425
426         return ret;
427 }
428
429 static void vme_user_vm_open(struct vm_area_struct *vma)
430 {
431         struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
432
433         refcount_inc(&vma_priv->refcnt);
434 }
435
436 static void vme_user_vm_close(struct vm_area_struct *vma)
437 {
438         struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
439         unsigned int minor = vma_priv->minor;
440
441         if (!refcount_dec_and_test(&vma_priv->refcnt))
442                 return;
443
444         mutex_lock(&image[minor].mutex);
445         image[minor].mmap_count--;
446         mutex_unlock(&image[minor].mutex);
447
448         kfree(vma_priv);
449 }
450
451 static const struct vm_operations_struct vme_user_vm_ops = {
452         .open = vme_user_vm_open,
453         .close = vme_user_vm_close,
454 };
455
456 static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
457 {
458         int err;
459         struct vme_user_vma_priv *vma_priv;
460
461         mutex_lock(&image[minor].mutex);
462
463         err = vme_master_mmap(image[minor].resource, vma);
464         if (err) {
465                 mutex_unlock(&image[minor].mutex);
466                 return err;
467         }
468
469         vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL);
470         if (!vma_priv) {
471                 mutex_unlock(&image[minor].mutex);
472                 return -ENOMEM;
473         }
474
475         vma_priv->minor = minor;
476         refcount_set(&vma_priv->refcnt, 1);
477         vma->vm_ops = &vme_user_vm_ops;
478         vma->vm_private_data = vma_priv;
479
480         image[minor].mmap_count++;
481
482         mutex_unlock(&image[minor].mutex);
483
484         return 0;
485 }
486
487 static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
488 {
489         unsigned int minor = MINOR(file_inode(file)->i_rdev);
490
491         if (type[minor] == MASTER_MINOR)
492                 return vme_user_master_mmap(minor, vma);
493
494         return -ENODEV;
495 }
496
497 static const struct file_operations vme_user_fops = {
498         .read = vme_user_read,
499         .write = vme_user_write,
500         .llseek = vme_user_llseek,
501         .unlocked_ioctl = vme_user_unlocked_ioctl,
502         .compat_ioctl = vme_user_unlocked_ioctl,
503         .mmap = vme_user_mmap,
504 };
505
506 static int vme_user_match(struct vme_dev *vdev)
507 {
508         int i;
509
510         int cur_bus = vme_bus_num(vdev);
511         int cur_slot = vme_slot_num(vdev);
512
513         for (i = 0; i < bus_num; i++)
514                 if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
515                         return 1;
516
517         return 0;
518 }
519
520 /*
521  * In this simple access driver, the old behaviour is being preserved as much
522  * as practical. We will therefore reserve the buffers and request the images
523  * here so that we don't have to do it later.
524  */
525 static int vme_user_probe(struct vme_dev *vdev)
526 {
527         int i, err;
528         char *name;
529
530         /* Save pointer to the bridge device */
531         if (vme_user_bridge) {
532                 dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
533                 err = -EINVAL;
534                 goto err_dev;
535         }
536         vme_user_bridge = vdev;
537
538         /* Initialise descriptors */
539         for (i = 0; i < VME_DEVS; i++) {
540                 image[i].kern_buf = NULL;
541                 image[i].pci_buf = 0;
542                 mutex_init(&image[i].mutex);
543                 image[i].device = NULL;
544                 image[i].resource = NULL;
545         }
546
547         /* Assign major and minor numbers for the driver */
548         err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
549                                      driver_name);
550         if (err) {
551                 dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
552                          VME_MAJOR);
553                 goto err_region;
554         }
555
556         /* Register the driver as a char device */
557         vme_user_cdev = cdev_alloc();
558         if (!vme_user_cdev) {
559                 err = -ENOMEM;
560                 goto err_char;
561         }
562         vme_user_cdev->ops = &vme_user_fops;
563         vme_user_cdev->owner = THIS_MODULE;
564         err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
565         if (err)
566                 goto err_char;
567
568         /* Request slave resources and allocate buffers (128kB wide) */
569         for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
570                 /* XXX Need to properly request attributes */
571                 /* For ca91cx42 bridge there are only two slave windows
572                  * supporting A16 addressing, so we request A24 supported
573                  * by all windows.
574                  */
575                 image[i].resource = vme_slave_request(vme_user_bridge,
576                         VME_A24, VME_SCT);
577                 if (!image[i].resource) {
578                         dev_warn(&vdev->dev,
579                                  "Unable to allocate slave resource\n");
580                         err = -ENOMEM;
581                         goto err_slave;
582                 }
583                 image[i].size_buf = PCI_BUF_SIZE;
584                 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
585                         image[i].size_buf, &image[i].pci_buf);
586                 if (!image[i].kern_buf) {
587                         dev_warn(&vdev->dev,
588                                  "Unable to allocate memory for buffer\n");
589                         image[i].pci_buf = 0;
590                         vme_slave_free(image[i].resource);
591                         err = -ENOMEM;
592                         goto err_slave;
593                 }
594         }
595
596         /*
597          * Request master resources allocate page sized buffers for small
598          * reads and writes
599          */
600         for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
601                 /* XXX Need to properly request attributes */
602                 image[i].resource = vme_master_request(vme_user_bridge,
603                         VME_A32, VME_SCT, VME_D32);
604                 if (!image[i].resource) {
605                         dev_warn(&vdev->dev,
606                                  "Unable to allocate master resource\n");
607                         err = -ENOMEM;
608                         goto err_master;
609                 }
610                 image[i].size_buf = PCI_BUF_SIZE;
611                 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
612                 if (!image[i].kern_buf) {
613                         err = -ENOMEM;
614                         vme_master_free(image[i].resource);
615                         goto err_master;
616                 }
617         }
618
619         /* Create sysfs entries - on udev systems this creates the dev files */
620         vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
621         if (IS_ERR(vme_user_sysfs_class)) {
622                 dev_err(&vdev->dev, "Error creating vme_user class.\n");
623                 err = PTR_ERR(vme_user_sysfs_class);
624                 goto err_class;
625         }
626
627         /* Add sysfs Entries */
628         for (i = 0; i < VME_DEVS; i++) {
629                 int num;
630
631                 switch (type[i]) {
632                 case MASTER_MINOR:
633                         name = "bus/vme/m%d";
634                         break;
635                 case CONTROL_MINOR:
636                         name = "bus/vme/ctl";
637                         break;
638                 case SLAVE_MINOR:
639                         name = "bus/vme/s%d";
640                         break;
641                 default:
642                         err = -EINVAL;
643                         goto err_sysfs;
644                 }
645
646                 num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
647                 image[i].device = device_create(vme_user_sysfs_class, NULL,
648                                         MKDEV(VME_MAJOR, i), NULL, name, num);
649                 if (IS_ERR(image[i].device)) {
650                         dev_info(&vdev->dev, "Error creating sysfs device\n");
651                         err = PTR_ERR(image[i].device);
652                         goto err_sysfs;
653                 }
654         }
655
656         return 0;
657
658 err_sysfs:
659         while (i > 0) {
660                 i--;
661                 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
662         }
663         class_destroy(vme_user_sysfs_class);
664
665         /* Ensure counter set correctly to unalloc all master windows */
666         i = MASTER_MAX + 1;
667 err_master:
668         while (i > MASTER_MINOR) {
669                 i--;
670                 kfree(image[i].kern_buf);
671                 vme_master_free(image[i].resource);
672         }
673
674         /*
675          * Ensure counter set correctly to unalloc all slave windows and buffers
676          */
677         i = SLAVE_MAX + 1;
678 err_slave:
679         while (i > SLAVE_MINOR) {
680                 i--;
681                 vme_free_consistent(image[i].resource, image[i].size_buf,
682                                     image[i].kern_buf, image[i].pci_buf);
683                 vme_slave_free(image[i].resource);
684         }
685 err_class:
686         cdev_del(vme_user_cdev);
687 err_char:
688         unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
689 err_region:
690 err_dev:
691         return err;
692 }
693
694 static int vme_user_remove(struct vme_dev *dev)
695 {
696         int i;
697
698         /* Remove sysfs Entries */
699         for (i = 0; i < VME_DEVS; i++) {
700                 mutex_destroy(&image[i].mutex);
701                 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
702         }
703         class_destroy(vme_user_sysfs_class);
704
705         for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
706                 kfree(image[i].kern_buf);
707                 vme_master_free(image[i].resource);
708         }
709
710         for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
711                 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
712                 vme_free_consistent(image[i].resource, image[i].size_buf,
713                                     image[i].kern_buf, image[i].pci_buf);
714                 vme_slave_free(image[i].resource);
715         }
716
717         /* Unregister device driver */
718         cdev_del(vme_user_cdev);
719
720         /* Unregister the major and minor device numbers */
721         unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
722
723         return 0;
724 }
725
726 static struct vme_driver vme_user_driver = {
727         .name = driver_name,
728         .match = vme_user_match,
729         .probe = vme_user_probe,
730         .remove = vme_user_remove,
731 };
732
733 static int __init vme_user_init(void)
734 {
735         int retval = 0;
736
737         pr_info("VME User Space Access Driver\n");
738
739         if (bus_num == 0) {
740                 pr_err("No cards, skipping registration\n");
741                 retval = -ENODEV;
742                 goto err_nocard;
743         }
744
745         /* Let's start by supporting one bus, we can support more than one
746          * in future revisions if that ever becomes necessary.
747          */
748         if (bus_num > VME_USER_BUS_MAX) {
749                 pr_err("Driver only able to handle %d buses\n",
750                        VME_USER_BUS_MAX);
751                 bus_num = VME_USER_BUS_MAX;
752         }
753
754         /*
755          * Here we just register the maximum number of devices we can and
756          * leave vme_user_match() to allow only 1 to go through to probe().
757          * This way, if we later want to allow multiple user access devices,
758          * we just change the code in vme_user_match().
759          */
760         retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
761         if (retval)
762                 goto err_reg;
763
764         return retval;
765
766 err_reg:
767 err_nocard:
768         return retval;
769 }
770
771 static void __exit vme_user_exit(void)
772 {
773         vme_unregister_driver(&vme_user_driver);
774 }
775
776 MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
777 module_param_array(bus, int, &bus_num, 0000);
778
779 MODULE_DESCRIPTION("VME User Space Access Driver");
780 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
781 MODULE_LICENSE("GPL");
782
783 module_init(vme_user_init);
784 module_exit(vme_user_exit);