GNU Linux-libre 4.9.337-gnu1
[releases.git] / drivers / media / pci / netup_unidvb / netup_unidvb_core.c
1 /*
2  * netup_unidvb_core.c
3  *
4  * Main module for NetUP Universal Dual DVB-CI
5  *
6  * Copyright (C) 2014 NetUP Inc.
7  * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
8  * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kmod.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/list.h>
30 #include <media/videobuf2-v4l2.h>
31 #include <media/videobuf2-vmalloc.h>
32
33 #include "netup_unidvb.h"
34 #include "cxd2841er.h"
35 #include "horus3a.h"
36 #include "ascot2e.h"
37 #include "helene.h"
38 #include "lnbh25.h"
39
40 static int spi_enable;
41 module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
42
43 MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
44 MODULE_AUTHOR("info@netup.ru");
45 MODULE_VERSION(NETUP_UNIDVB_VERSION);
46 MODULE_LICENSE("GPL");
47
48 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
49
50 /* Avalon-MM PCI-E registers */
51 #define AVL_PCIE_IENR           0x50
52 #define AVL_PCIE_ISR            0x40
53 #define AVL_IRQ_ENABLE          0x80
54 #define AVL_IRQ_ASSERTED        0x80
55 /* GPIO registers */
56 #define GPIO_REG_IO             0x4880
57 #define GPIO_REG_IO_TOGGLE      0x4882
58 #define GPIO_REG_IO_SET         0x4884
59 #define GPIO_REG_IO_CLEAR       0x4886
60 /* GPIO bits */
61 #define GPIO_FEA_RESET          (1 << 0)
62 #define GPIO_FEB_RESET          (1 << 1)
63 #define GPIO_RFA_CTL            (1 << 2)
64 #define GPIO_RFB_CTL            (1 << 3)
65 #define GPIO_FEA_TU_RESET       (1 << 4)
66 #define GPIO_FEB_TU_RESET       (1 << 5)
67 /* DMA base address */
68 #define NETUP_DMA0_ADDR         0x4900
69 #define NETUP_DMA1_ADDR         0x4940
70 /* 8 DMA blocks * 128 packets * 188 bytes*/
71 #define NETUP_DMA_BLOCKS_COUNT  8
72 #define NETUP_DMA_PACKETS_COUNT 128
73 /* DMA status bits */
74 #define BIT_DMA_RUN             1
75 #define BIT_DMA_ERROR           2
76 #define BIT_DMA_IRQ             0x200
77
78 /**
79  * struct netup_dma_regs - the map of DMA module registers
80  * @ctrlstat_set:       Control register, write to set control bits
81  * @ctrlstat_clear:     Control register, write to clear control bits
82  * @start_addr_lo:      DMA ring buffer start address, lower part
83  * @start_addr_hi:      DMA ring buffer start address, higher part
84  * @size:               DMA ring buffer size register
85                         Bits [0-7]:     DMA packet size, 188 bytes
86                         Bits [16-23]:   packets count in block, 128 packets
87                         Bits [24-31]:   blocks count, 8 blocks
88  * @timeout:            DMA timeout in units of 8ns
89                         For example, value of 375000000 equals to 3 sec
90  * @curr_addr_lo:       Current ring buffer head address, lower part
91  * @curr_addr_hi:       Current ring buffer head address, higher part
92  * @stat_pkt_received:  Statistic register, not tested
93  * @stat_pkt_accepted:  Statistic register, not tested
94  * @stat_pkt_overruns:  Statistic register, not tested
95  * @stat_pkt_underruns: Statistic register, not tested
96  * @stat_fifo_overruns: Statistic register, not tested
97  */
98 struct netup_dma_regs {
99         __le32  ctrlstat_set;
100         __le32  ctrlstat_clear;
101         __le32  start_addr_lo;
102         __le32  start_addr_hi;
103         __le32  size;
104         __le32  timeout;
105         __le32  curr_addr_lo;
106         __le32  curr_addr_hi;
107         __le32  stat_pkt_received;
108         __le32  stat_pkt_accepted;
109         __le32  stat_pkt_overruns;
110         __le32  stat_pkt_underruns;
111         __le32  stat_fifo_overruns;
112 } __packed __aligned(1);
113
114 struct netup_unidvb_buffer {
115         struct vb2_v4l2_buffer vb;
116         struct list_head        list;
117         u32                     size;
118 };
119
120 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
121 static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
122
123 static struct cxd2841er_config demod_config = {
124         .i2c_addr = 0xc8,
125         .xtal = SONY_XTAL_24000
126 };
127
128 static struct horus3a_config horus3a_conf = {
129         .i2c_address = 0xc0,
130         .xtal_freq_mhz = 16,
131         .set_tuner_callback = netup_unidvb_tuner_ctrl
132 };
133
134 static struct ascot2e_config ascot2e_conf = {
135         .i2c_address = 0xc2,
136         .set_tuner_callback = netup_unidvb_tuner_ctrl
137 };
138
139 static struct helene_config helene_conf = {
140         .i2c_address = 0xc0,
141         .xtal = SONY_HELENE_XTAL_24000,
142         .set_tuner_callback = netup_unidvb_tuner_ctrl
143 };
144
145 static struct lnbh25_config lnbh25_conf = {
146         .i2c_address = 0x10,
147         .data2_config = LNBH25_TEN | LNBH25_EXTM
148 };
149
150 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
151 {
152         u8 reg, mask;
153         struct netup_dma *dma = priv;
154         struct netup_unidvb_dev *ndev;
155
156         if (!priv)
157                 return -EINVAL;
158         ndev = dma->ndev;
159         dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
160                 __func__, dma->num, is_dvb_tc);
161         reg = readb(ndev->bmmio0 + GPIO_REG_IO);
162         mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
163
164         /* inverted tuner control in hw rev. 1.4 */
165         if (ndev->rev == NETUP_HW_REV_1_4)
166                 is_dvb_tc = !is_dvb_tc;
167
168         if (!is_dvb_tc)
169                 reg |= mask;
170         else
171                 reg &= ~mask;
172         writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
173         return 0;
174 }
175
176 static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
177 {
178         u16 gpio_reg;
179
180         /* enable PCI-E interrupts */
181         writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
182         /* unreset frontends bits[0:1] */
183         writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
184         msleep(100);
185         gpio_reg =
186                 GPIO_FEA_RESET | GPIO_FEB_RESET |
187                 GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
188                 GPIO_RFA_CTL | GPIO_RFB_CTL;
189         writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
190         dev_dbg(&ndev->pci_dev->dev,
191                 "%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
192                 __func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
193                 (int)readb(ndev->bmmio0 + GPIO_REG_IO));
194
195 }
196
197 static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
198 {
199         u32 irq_mask = (dma->num == 0 ?
200                 NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
201
202         dev_dbg(&dma->ndev->pci_dev->dev,
203                 "%s(): DMA%d enable %d\n", __func__, dma->num, enable);
204         if (enable) {
205                 writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
206                 writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_SET);
207         } else {
208                 writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
209                 writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_CLEAR);
210         }
211 }
212
213 static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
214 {
215         u64 addr_curr;
216         u32 size;
217         unsigned long flags;
218         struct device *dev = &dma->ndev->pci_dev->dev;
219
220         spin_lock_irqsave(&dma->lock, flags);
221         addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
222                 (u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
223         /* clear IRQ */
224         writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
225         /* sanity check */
226         if (addr_curr < dma->addr_phys ||
227                         addr_curr > dma->addr_phys +  dma->ring_buffer_size) {
228                 if (addr_curr != 0) {
229                         dev_err(dev,
230                                 "%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
231                                 __func__, addr_curr, (u64)dma->addr_phys,
232                                 (u64)(dma->addr_phys + dma->ring_buffer_size));
233                 }
234                 goto irq_handled;
235         }
236         size = (addr_curr >= dma->addr_last) ?
237                 (u32)(addr_curr - dma->addr_last) :
238                 (u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
239         if (dma->data_size != 0) {
240                 printk_ratelimited("%s(): lost interrupt, data size %d\n",
241                         __func__, dma->data_size);
242                 dma->data_size += size;
243         }
244         if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
245                 dma->data_size = size;
246                 dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
247         }
248         dma->addr_last = addr_curr;
249         queue_work(dma->ndev->wq, &dma->work);
250 irq_handled:
251         spin_unlock_irqrestore(&dma->lock, flags);
252         return IRQ_HANDLED;
253 }
254
255 static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
256 {
257         struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
258         struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
259         u32 reg40, reg_isr;
260         irqreturn_t iret = IRQ_NONE;
261
262         /* disable interrupts */
263         writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
264         /* check IRQ source */
265         reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
266         if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
267                 /* IRQ is being signaled */
268                 reg_isr = readw(ndev->bmmio0 + REG_ISR);
269                 if (reg_isr & NETUP_UNIDVB_IRQ_SPI)
270                         iret = netup_spi_interrupt(ndev->spi);
271                 else if (!ndev->old_fw) {
272                         if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
273                                 iret = netup_i2c_interrupt(&ndev->i2c[0]);
274                         } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
275                                 iret = netup_i2c_interrupt(&ndev->i2c[1]);
276                         } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
277                                 iret = netup_dma_interrupt(&ndev->dma[0]);
278                         } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
279                                 iret = netup_dma_interrupt(&ndev->dma[1]);
280                         } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
281                                 iret = netup_ci_interrupt(ndev);
282                         } else {
283                                 goto err;
284                         }
285                 } else {
286 err:
287                         dev_err(&pci_dev->dev,
288                                 "%s(): unknown interrupt 0x%x\n",
289                                 __func__, reg_isr);
290                 }
291         }
292         /* re-enable interrupts */
293         writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
294         return iret;
295 }
296
297 static int netup_unidvb_queue_setup(struct vb2_queue *vq,
298                                     unsigned int *nbuffers,
299                                     unsigned int *nplanes,
300                                     unsigned int sizes[],
301                                     struct device *alloc_devs[])
302 {
303         struct netup_dma *dma = vb2_get_drv_priv(vq);
304
305         dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
306
307         *nplanes = 1;
308         if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
309                 *nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
310         sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
311         dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
312                 __func__, *nbuffers, sizes[0]);
313         return 0;
314 }
315
316 static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
317 {
318         struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
319         struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
320         struct netup_unidvb_buffer *buf = container_of(vbuf,
321                                 struct netup_unidvb_buffer, vb);
322
323         dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
324         buf->size = 0;
325         return 0;
326 }
327
328 static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
329 {
330         unsigned long flags;
331         struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
332         struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
333         struct netup_unidvb_buffer *buf = container_of(vbuf,
334                                 struct netup_unidvb_buffer, vb);
335
336         dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
337         spin_lock_irqsave(&dma->lock, flags);
338         list_add_tail(&buf->list, &dma->free_buffers);
339         spin_unlock_irqrestore(&dma->lock, flags);
340         mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
341 }
342
343 static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
344 {
345         struct netup_dma *dma = vb2_get_drv_priv(q);
346
347         dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
348         netup_unidvb_dma_enable(dma, 1);
349         return 0;
350 }
351
352 static void netup_unidvb_stop_streaming(struct vb2_queue *q)
353 {
354         struct netup_dma *dma = vb2_get_drv_priv(q);
355
356         dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
357         netup_unidvb_dma_enable(dma, 0);
358         netup_unidvb_queue_cleanup(dma);
359 }
360
361 static const struct vb2_ops dvb_qops = {
362         .queue_setup            = netup_unidvb_queue_setup,
363         .buf_prepare            = netup_unidvb_buf_prepare,
364         .buf_queue              = netup_unidvb_buf_queue,
365         .start_streaming        = netup_unidvb_start_streaming,
366         .stop_streaming         = netup_unidvb_stop_streaming,
367 };
368
369 static int netup_unidvb_queue_init(struct netup_dma *dma,
370                                    struct vb2_queue *vb_queue)
371 {
372         int res;
373
374         /* Init videobuf2 queue structure */
375         vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
376         vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
377         vb_queue->drv_priv = dma;
378         vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
379         vb_queue->ops = &dvb_qops;
380         vb_queue->mem_ops = &vb2_vmalloc_memops;
381         vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
382         res = vb2_queue_init(vb_queue);
383         if (res != 0) {
384                 dev_err(&dma->ndev->pci_dev->dev,
385                         "%s(): vb2_queue_init failed (%d)\n", __func__, res);
386         }
387         return res;
388 }
389
390 static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
391                                  int num)
392 {
393         int fe_count = 2;
394         int i = 0;
395         struct vb2_dvb_frontend *fes[2];
396         u8 fe_name[32];
397
398         if (ndev->rev == NETUP_HW_REV_1_3)
399                 demod_config.xtal = SONY_XTAL_20500;
400         else
401                 demod_config.xtal = SONY_XTAL_24000;
402
403         if (num < 0 || num > 1) {
404                 dev_dbg(&ndev->pci_dev->dev,
405                         "%s(): unable to init DVB bus %d\n", __func__, num);
406                 return -ENODEV;
407         }
408         mutex_init(&ndev->frontends[num].lock);
409         INIT_LIST_HEAD(&ndev->frontends[num].felist);
410
411         for (i = 0; i < fe_count; i++) {
412                 if (vb2_dvb_alloc_frontend(&ndev->frontends[num], i+1)
413                                 == NULL) {
414                         dev_err(&ndev->pci_dev->dev,
415                                         "%s(): unable to allocate vb2_dvb_frontend\n",
416                                         __func__);
417                         return -ENOMEM;
418                 }
419         }
420
421         for (i = 0; i < fe_count; i++) {
422                 fes[i] = vb2_dvb_get_frontend(&ndev->frontends[num], i+1);
423                 if (fes[i] == NULL) {
424                         dev_err(&ndev->pci_dev->dev,
425                                 "%s(): frontends has not been allocated\n",
426                                 __func__);
427                         return -EINVAL;
428                 }
429         }
430
431         for (i = 0; i < fe_count; i++) {
432                 netup_unidvb_queue_init(&ndev->dma[num], &fes[i]->dvb.dvbq);
433                 snprintf(fe_name, sizeof(fe_name), "netup_fe%d", i);
434                 fes[i]->dvb.name = fe_name;
435         }
436
437         fes[0]->dvb.frontend = dvb_attach(cxd2841er_attach_s,
438                 &demod_config, &ndev->i2c[num].adap);
439         if (fes[0]->dvb.frontend == NULL) {
440                 dev_dbg(&ndev->pci_dev->dev,
441                         "%s(): unable to attach DVB-S/S2 frontend\n",
442                         __func__);
443                 goto frontend_detach;
444         }
445
446         if (ndev->rev == NETUP_HW_REV_1_3) {
447                 horus3a_conf.set_tuner_priv = &ndev->dma[num];
448                 if (!dvb_attach(horus3a_attach, fes[0]->dvb.frontend,
449                                         &horus3a_conf, &ndev->i2c[num].adap)) {
450                         dev_dbg(&ndev->pci_dev->dev,
451                                         "%s(): unable to attach HORUS3A DVB-S/S2 tuner frontend\n",
452                                         __func__);
453                         goto frontend_detach;
454                 }
455         } else {
456                 helene_conf.set_tuner_priv = &ndev->dma[num];
457                 if (!dvb_attach(helene_attach_s, fes[0]->dvb.frontend,
458                                         &helene_conf, &ndev->i2c[num].adap)) {
459                         dev_err(&ndev->pci_dev->dev,
460                                         "%s(): unable to attach HELENE DVB-S/S2 tuner frontend\n",
461                                         __func__);
462                         goto frontend_detach;
463                 }
464         }
465
466         if (!dvb_attach(lnbh25_attach, fes[0]->dvb.frontend,
467                         &lnbh25_conf, &ndev->i2c[num].adap)) {
468                 dev_dbg(&ndev->pci_dev->dev,
469                         "%s(): unable to attach SEC frontend\n", __func__);
470                 goto frontend_detach;
471         }
472
473         /* DVB-T/T2 frontend */
474         fes[1]->dvb.frontend = dvb_attach(cxd2841er_attach_t_c,
475                 &demod_config, &ndev->i2c[num].adap);
476         if (fes[1]->dvb.frontend == NULL) {
477                 dev_dbg(&ndev->pci_dev->dev,
478                         "%s(): unable to attach Ter frontend\n", __func__);
479                 goto frontend_detach;
480         }
481         fes[1]->dvb.frontend->id = 1;
482         if (ndev->rev == NETUP_HW_REV_1_3) {
483                 ascot2e_conf.set_tuner_priv = &ndev->dma[num];
484                 if (!dvb_attach(ascot2e_attach, fes[1]->dvb.frontend,
485                                         &ascot2e_conf, &ndev->i2c[num].adap)) {
486                         dev_dbg(&ndev->pci_dev->dev,
487                                         "%s(): unable to attach Ter tuner frontend\n",
488                                         __func__);
489                         goto frontend_detach;
490                 }
491         } else {
492                 helene_conf.set_tuner_priv = &ndev->dma[num];
493                 if (!dvb_attach(helene_attach, fes[1]->dvb.frontend,
494                                         &helene_conf, &ndev->i2c[num].adap)) {
495                         dev_err(&ndev->pci_dev->dev,
496                                         "%s(): unable to attach HELENE Ter tuner frontend\n",
497                                         __func__);
498                         goto frontend_detach;
499                 }
500         }
501
502         if (vb2_dvb_register_bus(&ndev->frontends[num],
503                                  THIS_MODULE, NULL,
504                                  &ndev->pci_dev->dev, NULL, adapter_nr, 1)) {
505                 dev_dbg(&ndev->pci_dev->dev,
506                         "%s(): unable to register DVB bus %d\n",
507                         __func__, num);
508                 goto frontend_detach;
509         }
510         dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
511         return 0;
512 frontend_detach:
513         vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
514         return -EINVAL;
515 }
516
517 static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
518 {
519         if (num < 0 || num > 1) {
520                 dev_err(&ndev->pci_dev->dev,
521                         "%s(): unable to unregister DVB bus %d\n",
522                         __func__, num);
523                 return;
524         }
525         vb2_dvb_unregister_bus(&ndev->frontends[num]);
526         dev_info(&ndev->pci_dev->dev,
527                 "%s(): DVB bus %d unregistered\n", __func__, num);
528 }
529
530 static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
531 {
532         int res;
533
534         res = netup_unidvb_dvb_init(ndev, 0);
535         if (res)
536                 return res;
537         res = netup_unidvb_dvb_init(ndev, 1);
538         if (res) {
539                 netup_unidvb_dvb_fini(ndev, 0);
540                 return res;
541         }
542         return 0;
543 }
544
545 static int netup_unidvb_ring_copy(struct netup_dma *dma,
546                                   struct netup_unidvb_buffer *buf)
547 {
548         u32 copy_bytes, ring_bytes;
549         u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
550         u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
551         struct netup_unidvb_dev *ndev = dma->ndev;
552
553         if (p == NULL) {
554                 dev_err(&ndev->pci_dev->dev,
555                         "%s(): buffer is NULL\n", __func__);
556                 return -EINVAL;
557         }
558         p += buf->size;
559         if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
560                 ring_bytes = dma->ring_buffer_size - dma->data_offset;
561                 copy_bytes = (ring_bytes > buff_bytes) ?
562                         buff_bytes : ring_bytes;
563                 memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
564                 p += copy_bytes;
565                 buf->size += copy_bytes;
566                 buff_bytes -= copy_bytes;
567                 dma->data_size -= copy_bytes;
568                 dma->data_offset += copy_bytes;
569                 if (dma->data_offset == dma->ring_buffer_size)
570                         dma->data_offset = 0;
571         }
572         if (buff_bytes > 0) {
573                 ring_bytes = dma->data_size;
574                 copy_bytes = (ring_bytes > buff_bytes) ?
575                                 buff_bytes : ring_bytes;
576                 memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
577                 buf->size += copy_bytes;
578                 dma->data_size -= copy_bytes;
579                 dma->data_offset += copy_bytes;
580                 if (dma->data_offset == dma->ring_buffer_size)
581                         dma->data_offset = 0;
582         }
583         return 0;
584 }
585
586 static void netup_unidvb_dma_worker(struct work_struct *work)
587 {
588         struct netup_dma *dma = container_of(work, struct netup_dma, work);
589         struct netup_unidvb_dev *ndev = dma->ndev;
590         struct netup_unidvb_buffer *buf;
591         unsigned long flags;
592
593         spin_lock_irqsave(&dma->lock, flags);
594         if (dma->data_size == 0) {
595                 dev_dbg(&ndev->pci_dev->dev,
596                         "%s(): data_size == 0\n", __func__);
597                 goto work_done;
598         }
599         while (dma->data_size > 0) {
600                 if (list_empty(&dma->free_buffers)) {
601                         dev_dbg(&ndev->pci_dev->dev,
602                                 "%s(): no free buffers\n", __func__);
603                         goto work_done;
604                 }
605                 buf = list_first_entry(&dma->free_buffers,
606                         struct netup_unidvb_buffer, list);
607                 if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
608                         dev_dbg(&ndev->pci_dev->dev,
609                                 "%s(): buffer overflow, size %d\n",
610                                 __func__, buf->size);
611                         goto work_done;
612                 }
613                 if (netup_unidvb_ring_copy(dma, buf))
614                         goto work_done;
615                 if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
616                         list_del(&buf->list);
617                         dev_dbg(&ndev->pci_dev->dev,
618                                 "%s(): buffer %p done, size %d\n",
619                                 __func__, buf, buf->size);
620                         buf->vb.vb2_buf.timestamp = ktime_get_ns();
621                         vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
622                         vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
623                 }
624         }
625 work_done:
626         dma->data_size = 0;
627         spin_unlock_irqrestore(&dma->lock, flags);
628 }
629
630 static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
631 {
632         struct netup_unidvb_buffer *buf;
633         unsigned long flags;
634
635         spin_lock_irqsave(&dma->lock, flags);
636         while (!list_empty(&dma->free_buffers)) {
637                 buf = list_first_entry(&dma->free_buffers,
638                         struct netup_unidvb_buffer, list);
639                 list_del(&buf->list);
640                 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
641         }
642         spin_unlock_irqrestore(&dma->lock, flags);
643 }
644
645 static void netup_unidvb_dma_timeout(unsigned long data)
646 {
647         struct netup_dma *dma = (struct netup_dma *)data;
648         struct netup_unidvb_dev *ndev = dma->ndev;
649
650         dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
651         netup_unidvb_queue_cleanup(dma);
652 }
653
654 static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
655 {
656         struct netup_dma *dma;
657         struct device *dev = &ndev->pci_dev->dev;
658
659         if (num < 0 || num > 1) {
660                 dev_err(dev, "%s(): unable to register DMA%d\n",
661                         __func__, num);
662                 return -ENODEV;
663         }
664         dma = &ndev->dma[num];
665         dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
666         dma->num = num;
667         dma->ndev = ndev;
668         spin_lock_init(&dma->lock);
669         INIT_WORK(&dma->work, netup_unidvb_dma_worker);
670         INIT_LIST_HEAD(&dma->free_buffers);
671         dma->timeout.function = netup_unidvb_dma_timeout;
672         dma->timeout.data = (unsigned long)dma;
673         init_timer(&dma->timeout);
674         dma->ring_buffer_size = ndev->dma_size / 2;
675         dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
676         dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
677                 dma->ring_buffer_size * num);
678         dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
679                 __func__, num, dma->addr_virt,
680                 (unsigned long long)dma->addr_phys,
681                 dma->ring_buffer_size);
682         memset_io((u8 __iomem *)dma->addr_virt, 0, dma->ring_buffer_size);
683         dma->addr_last = dma->addr_phys;
684         dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
685         dma->regs = (struct netup_dma_regs __iomem *)(num == 0 ?
686                 ndev->bmmio0 + NETUP_DMA0_ADDR :
687                 ndev->bmmio0 + NETUP_DMA1_ADDR);
688         writel((NETUP_DMA_BLOCKS_COUNT << 24) |
689                 (NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
690         writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
691         writel(0, &dma->regs->start_addr_hi);
692         writel(dma->high_addr, ndev->bmmio0 + 0x1000);
693         writel(375000000, &dma->regs->timeout);
694         msleep(1000);
695         writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
696         return 0;
697 }
698
699 static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
700 {
701         struct netup_dma *dma;
702
703         if (num < 0 || num > 1)
704                 return;
705         dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
706         dma = &ndev->dma[num];
707         netup_unidvb_dma_enable(dma, 0);
708         msleep(50);
709         cancel_work_sync(&dma->work);
710         del_timer(&dma->timeout);
711 }
712
713 static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
714 {
715         int res;
716
717         res = netup_unidvb_dma_init(ndev, 0);
718         if (res)
719                 return res;
720         res = netup_unidvb_dma_init(ndev, 1);
721         if (res) {
722                 netup_unidvb_dma_fini(ndev, 0);
723                 return res;
724         }
725         netup_unidvb_dma_enable(&ndev->dma[0], 0);
726         netup_unidvb_dma_enable(&ndev->dma[1], 0);
727         return 0;
728 }
729
730 static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
731                                  struct pci_dev *pci_dev)
732 {
733         int res;
734
735         writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
736         res = netup_unidvb_ci_register(ndev, 0, pci_dev);
737         if (res)
738                 return res;
739         res = netup_unidvb_ci_register(ndev, 1, pci_dev);
740         if (res)
741                 netup_unidvb_ci_unregister(ndev, 0);
742         return res;
743 }
744
745 static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
746 {
747         if (!request_mem_region(pci_resource_start(pci_dev, 0),
748                         pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
749                 dev_err(&pci_dev->dev,
750                         "%s(): unable to request MMIO bar 0 at 0x%llx\n",
751                         __func__,
752                         (unsigned long long)pci_resource_start(pci_dev, 0));
753                 return -EBUSY;
754         }
755         if (!request_mem_region(pci_resource_start(pci_dev, 1),
756                         pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
757                 dev_err(&pci_dev->dev,
758                         "%s(): unable to request MMIO bar 1 at 0x%llx\n",
759                         __func__,
760                         (unsigned long long)pci_resource_start(pci_dev, 1));
761                 release_mem_region(pci_resource_start(pci_dev, 0),
762                         pci_resource_len(pci_dev, 0));
763                 return -EBUSY;
764         }
765         return 0;
766 }
767
768 static int netup_unidvb_request_modules(struct device *dev)
769 {
770         static const char * const modules[] = {
771                 "lnbh25", "ascot2e", "horus3a", "cxd2841er", "helene", NULL
772         };
773         const char * const *curr_mod = modules;
774         int err;
775
776         while (*curr_mod != NULL) {
777                 err = request_module(*curr_mod);
778                 if (err) {
779                         dev_warn(dev, "request_module(%s) failed: %d\n",
780                                 *curr_mod, err);
781                 }
782                 ++curr_mod;
783         }
784         return 0;
785 }
786
787 static int netup_unidvb_initdev(struct pci_dev *pci_dev,
788                                 const struct pci_device_id *pci_id)
789 {
790         u8 board_revision;
791         u16 board_vendor;
792         struct netup_unidvb_dev *ndev;
793         int old_firmware = 0;
794
795         netup_unidvb_request_modules(&pci_dev->dev);
796
797         /* Check card revision */
798         if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
799                 dev_err(&pci_dev->dev,
800                         "netup_unidvb: expected card revision %d, got %d\n",
801                         NETUP_PCI_DEV_REVISION, pci_dev->revision);
802                 dev_err(&pci_dev->dev,
803                         "Please upgrade firmware!\n");
804                 dev_err(&pci_dev->dev,
805                         "Instructions on http://www.netup.tv\n");
806                 old_firmware = 1;
807                 spi_enable = 1;
808         }
809
810         /* allocate device context */
811         ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
812         if (!ndev)
813                 goto dev_alloc_err;
814
815         /* detect hardware revision */
816         if (pci_dev->device == NETUP_HW_REV_1_3)
817                 ndev->rev = NETUP_HW_REV_1_3;
818         else
819                 ndev->rev = NETUP_HW_REV_1_4;
820
821         dev_info(&pci_dev->dev,
822                 "%s(): board (0x%x) hardware revision 0x%x\n",
823                 __func__, pci_dev->device, ndev->rev);
824
825         ndev->old_fw = old_firmware;
826         ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
827         if (!ndev->wq) {
828                 dev_err(&pci_dev->dev,
829                         "%s(): unable to create workqueue\n", __func__);
830                 goto wq_create_err;
831         }
832         ndev->pci_dev = pci_dev;
833         ndev->pci_bus = pci_dev->bus->number;
834         ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
835         ndev->pci_func = PCI_FUNC(pci_dev->devfn);
836         ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
837         pci_set_drvdata(pci_dev, ndev);
838         /* PCI init */
839         dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
840                 __func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
841
842         if (pci_enable_device(pci_dev)) {
843                 dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
844                         __func__);
845                 goto pci_enable_err;
846         }
847         /* read PCI info */
848         pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
849         pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
850         if (board_vendor != NETUP_VENDOR_ID) {
851                 dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
852                         __func__, board_vendor);
853                 goto pci_detect_err;
854         }
855         dev_info(&pci_dev->dev,
856                 "%s(): board vendor 0x%x, revision 0x%x\n",
857                 __func__, board_vendor, board_revision);
858         pci_set_master(pci_dev);
859         if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
860                 dev_err(&pci_dev->dev,
861                         "%s(): 32bit PCI DMA is not supported\n", __func__);
862                 goto pci_detect_err;
863         }
864         dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
865         /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
866         pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
867                 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
868                 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
869         /* Adjust PCIe completion timeout. */
870         pcie_capability_clear_and_set_word(pci_dev,
871                 PCI_EXP_DEVCTL2, 0xf, 0x2);
872
873         if (netup_unidvb_request_mmio(pci_dev)) {
874                 dev_err(&pci_dev->dev,
875                         "%s(): unable to request MMIO regions\n", __func__);
876                 goto pci_detect_err;
877         }
878         ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
879                 pci_resource_len(pci_dev, 0));
880         if (!ndev->lmmio0) {
881                 dev_err(&pci_dev->dev,
882                         "%s(): unable to remap MMIO bar 0\n", __func__);
883                 goto pci_bar0_error;
884         }
885         ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
886                 pci_resource_len(pci_dev, 1));
887         if (!ndev->lmmio1) {
888                 dev_err(&pci_dev->dev,
889                         "%s(): unable to remap MMIO bar 1\n", __func__);
890                 goto pci_bar1_error;
891         }
892         ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
893         ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
894         dev_info(&pci_dev->dev,
895                 "%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
896                 __func__,
897                 ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
898                 ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
899                 pci_dev->irq);
900         if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
901                         "netup_unidvb", pci_dev) < 0) {
902                 dev_err(&pci_dev->dev,
903                         "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
904                 goto irq_request_err;
905         }
906         ndev->dma_size = 2 * 188 *
907                 NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
908         ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
909                 ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
910         if (!ndev->dma_virt) {
911                 dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
912                         __func__);
913                 goto dma_alloc_err;
914         }
915         netup_unidvb_dev_enable(ndev);
916         if (spi_enable && netup_spi_init(ndev)) {
917                 dev_warn(&pci_dev->dev,
918                         "netup_unidvb: SPI flash setup failed\n");
919                 goto spi_setup_err;
920         }
921         if (old_firmware) {
922                 dev_err(&pci_dev->dev,
923                         "netup_unidvb: card initialization was incomplete\n");
924                 return 0;
925         }
926         if (netup_i2c_register(ndev)) {
927                 dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
928                 goto i2c_setup_err;
929         }
930         /* enable I2C IRQs */
931         writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
932                 ndev->bmmio0 + REG_IMASK_SET);
933         usleep_range(5000, 10000);
934         if (netup_unidvb_dvb_setup(ndev)) {
935                 dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
936                 goto dvb_setup_err;
937         }
938         if (netup_unidvb_ci_setup(ndev, pci_dev)) {
939                 dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
940                 goto ci_setup_err;
941         }
942         if (netup_unidvb_dma_setup(ndev)) {
943                 dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
944                 goto dma_setup_err;
945         }
946         dev_info(&pci_dev->dev,
947                 "netup_unidvb: device has been initialized\n");
948         return 0;
949 dma_setup_err:
950         netup_unidvb_ci_unregister(ndev, 0);
951         netup_unidvb_ci_unregister(ndev, 1);
952 ci_setup_err:
953         netup_unidvb_dvb_fini(ndev, 0);
954         netup_unidvb_dvb_fini(ndev, 1);
955 dvb_setup_err:
956         netup_i2c_unregister(ndev);
957 i2c_setup_err:
958         if (ndev->spi)
959                 netup_spi_release(ndev);
960 spi_setup_err:
961         dma_free_coherent(&pci_dev->dev, ndev->dma_size,
962                         ndev->dma_virt, ndev->dma_phys);
963 dma_alloc_err:
964         free_irq(pci_dev->irq, pci_dev);
965 irq_request_err:
966         iounmap(ndev->lmmio1);
967 pci_bar1_error:
968         iounmap(ndev->lmmio0);
969 pci_bar0_error:
970         release_mem_region(pci_resource_start(pci_dev, 0),
971                 pci_resource_len(pci_dev, 0));
972         release_mem_region(pci_resource_start(pci_dev, 1),
973                 pci_resource_len(pci_dev, 1));
974 pci_detect_err:
975         pci_disable_device(pci_dev);
976 pci_enable_err:
977         pci_set_drvdata(pci_dev, NULL);
978         destroy_workqueue(ndev->wq);
979 wq_create_err:
980         kfree(ndev);
981 dev_alloc_err:
982         dev_err(&pci_dev->dev,
983                 "%s(): failed to initialize device\n", __func__);
984         return -EIO;
985 }
986
987 static void netup_unidvb_finidev(struct pci_dev *pci_dev)
988 {
989         struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
990
991         dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
992         if (!ndev->old_fw) {
993                 netup_unidvb_dma_fini(ndev, 0);
994                 netup_unidvb_dma_fini(ndev, 1);
995                 netup_unidvb_ci_unregister(ndev, 0);
996                 netup_unidvb_ci_unregister(ndev, 1);
997                 netup_unidvb_dvb_fini(ndev, 0);
998                 netup_unidvb_dvb_fini(ndev, 1);
999                 netup_i2c_unregister(ndev);
1000         }
1001         if (ndev->spi)
1002                 netup_spi_release(ndev);
1003         writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
1004         dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
1005                         ndev->dma_virt, ndev->dma_phys);
1006         free_irq(pci_dev->irq, pci_dev);
1007         iounmap(ndev->lmmio0);
1008         iounmap(ndev->lmmio1);
1009         release_mem_region(pci_resource_start(pci_dev, 0),
1010                 pci_resource_len(pci_dev, 0));
1011         release_mem_region(pci_resource_start(pci_dev, 1),
1012                 pci_resource_len(pci_dev, 1));
1013         pci_disable_device(pci_dev);
1014         pci_set_drvdata(pci_dev, NULL);
1015         destroy_workqueue(ndev->wq);
1016         kfree(ndev);
1017         dev_info(&pci_dev->dev,
1018                 "%s(): device has been successfully stopped\n", __func__);
1019 }
1020
1021
1022 static struct pci_device_id netup_unidvb_pci_tbl[] = {
1023         { PCI_DEVICE(0x1b55, 0x18f6) }, /* hw rev. 1.3 */
1024         { PCI_DEVICE(0x1b55, 0x18f7) }, /* hw rev. 1.4 */
1025         { 0, }
1026 };
1027 MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
1028
1029 static struct pci_driver netup_unidvb_pci_driver = {
1030         .name     = "netup_unidvb",
1031         .id_table = netup_unidvb_pci_tbl,
1032         .probe    = netup_unidvb_initdev,
1033         .remove   = netup_unidvb_finidev,
1034         .suspend  = NULL,
1035         .resume   = NULL,
1036 };
1037
1038 static int __init netup_unidvb_init(void)
1039 {
1040         return pci_register_driver(&netup_unidvb_pci_driver);
1041 }
1042
1043 static void __exit netup_unidvb_fini(void)
1044 {
1045         pci_unregister_driver(&netup_unidvb_pci_driver);
1046 }
1047
1048 module_init(netup_unidvb_init);
1049 module_exit(netup_unidvb_fini);