2 * Driver for PLX NET2272 USB device controller
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/gpio.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/kernel.h>
31 #include <linux/list.h>
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <linux/prefetch.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/timer.h>
40 #include <linux/usb.h>
41 #include <linux/usb/ch9.h>
42 #include <linux/usb/gadget.h>
44 #include <asm/byteorder.h>
45 #include <asm/unaligned.h>
49 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
51 static const char driver_name[] = "net2272";
52 static const char driver_vers[] = "2006 October 17/mainline";
53 static const char driver_desc[] = DRIVER_DESC;
55 static const char ep0name[] = "ep0";
56 static const char * const ep_name[] = {
58 "ep-a", "ep-b", "ep-c",
61 #ifdef CONFIG_USB_NET2272_DMA
63 * use_dma: the NET2272 can use an external DMA controller.
64 * Note that since there is no generic DMA api, some functions,
65 * notably request_dma, start_dma, and cancel_dma will need to be
66 * modified for your platform's particular dma controller.
68 * If use_dma is disabled, pio will be used instead.
70 static bool use_dma = 0;
71 module_param(use_dma, bool, 0644);
74 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
75 * The NET2272 can only use dma for a single endpoint at a time.
76 * At some point this could be modified to allow either endpoint
77 * to take control of dma as it becomes available.
79 * Note that DMA should not be used on OUT endpoints unless it can
80 * be guaranteed that no short packets will arrive on an IN endpoint
81 * while the DMA operation is pending. Otherwise the OUT DMA will
82 * terminate prematurely (See NET2272 Errata 630-0213-0101)
84 static ushort dma_ep = 1;
85 module_param(dma_ep, ushort, 0644);
88 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
89 * mode 0 == Slow DREQ mode
90 * mode 1 == Fast DREQ mode
91 * mode 2 == Burst mode
93 static ushort dma_mode = 2;
94 module_param(dma_mode, ushort, 0644);
102 * fifo_mode: net2272 buffer configuration:
103 * mode 0 == ep-{a,b,c} 512db each
104 * mode 1 == ep-a 1k, ep-{b,c} 512db
105 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
106 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
108 static ushort fifo_mode = 0;
109 module_param(fifo_mode, ushort, 0644);
112 * enable_suspend: When enabled, the driver will respond to
113 * USB suspend requests by powering down the NET2272. Otherwise,
114 * USB suspend requests will be ignored. This is acceptible for
115 * self-powered devices. For bus powered devices set this to 1.
117 static ushort enable_suspend = 0;
118 module_param(enable_suspend, ushort, 0644);
120 static void assert_out_naking(struct net2272_ep *ep, const char *where)
128 tmp = net2272_ep_read(ep, EP_STAT0);
129 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
130 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
131 ep->ep.name, where, tmp);
132 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
135 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
137 static void stop_out_naking(struct net2272_ep *ep)
139 u8 tmp = net2272_ep_read(ep, EP_STAT0);
141 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
142 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
145 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
147 static char *type_string(u8 bmAttributes)
149 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
150 case USB_ENDPOINT_XFER_BULK: return "bulk";
151 case USB_ENDPOINT_XFER_ISOC: return "iso";
152 case USB_ENDPOINT_XFER_INT: return "intr";
153 default: return "control";
157 static char *buf_state_string(unsigned state)
160 case BUFF_FREE: return "free";
161 case BUFF_VALID: return "valid";
162 case BUFF_LCL: return "local";
163 case BUFF_USB: return "usb";
164 default: return "unknown";
168 static char *dma_mode_string(void)
173 case 0: return "SLOW DREQ";
174 case 1: return "FAST DREQ";
175 case 2: return "BURST";
176 default: return "invalid";
180 static void net2272_dequeue_all(struct net2272_ep *);
181 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
182 static int net2272_fifo_status(struct usb_ep *);
184 static struct usb_ep_ops net2272_ep_ops;
186 /*---------------------------------------------------------------------------*/
189 net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
192 struct net2272_ep *ep;
197 ep = container_of(_ep, struct net2272_ep, ep);
198 if (!_ep || !desc || ep->desc || _ep->name == ep0name
199 || desc->bDescriptorType != USB_DT_ENDPOINT)
202 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
205 max = usb_endpoint_maxp(desc) & 0x1fff;
207 spin_lock_irqsave(&dev->lock, flags);
208 _ep->maxpacket = max & 0x7fff;
211 /* net2272_ep_reset() has already been called */
215 /* set speed-dependent max packet */
216 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
217 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
219 /* set type, direction, address; reset fifo counters */
220 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
221 tmp = usb_endpoint_type(desc);
222 if (usb_endpoint_xfer_bulk(desc)) {
223 /* catch some particularly blatant driver bugs */
224 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
225 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
226 spin_unlock_irqrestore(&dev->lock, flags);
230 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
231 tmp <<= ENDPOINT_TYPE;
232 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
233 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
234 tmp |= (1 << ENDPOINT_ENABLE);
236 /* for OUT transfers, block the rx fifo until a read is posted */
237 ep->is_in = usb_endpoint_dir_in(desc);
239 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
241 net2272_ep_write(ep, EP_CFG, tmp);
244 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
245 net2272_write(dev, IRQENB0, tmp);
247 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
248 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
249 | net2272_ep_read(ep, EP_IRQENB);
250 net2272_ep_write(ep, EP_IRQENB, tmp);
252 tmp = desc->bEndpointAddress;
253 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
254 _ep->name, tmp & 0x0f, PIPEDIR(tmp),
255 type_string(desc->bmAttributes), max,
256 net2272_ep_read(ep, EP_CFG));
258 spin_unlock_irqrestore(&dev->lock, flags);
262 static void net2272_ep_reset(struct net2272_ep *ep)
267 INIT_LIST_HEAD(&ep->queue);
269 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
270 ep->ep.ops = &net2272_ep_ops;
272 /* disable irqs, endpoint */
273 net2272_ep_write(ep, EP_IRQENB, 0);
275 /* init to our chosen defaults, notably so that we NAK OUT
276 * packets until the driver queues a read.
278 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
279 net2272_ep_write(ep, EP_RSPSET, tmp);
281 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
283 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
285 net2272_ep_write(ep, EP_RSPCLR, tmp);
287 /* scrub most status bits, and flush any fifo state */
288 net2272_ep_write(ep, EP_STAT0,
289 (1 << DATA_IN_TOKEN_INTERRUPT)
290 | (1 << DATA_OUT_TOKEN_INTERRUPT)
291 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
292 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
293 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
295 net2272_ep_write(ep, EP_STAT1,
297 | (1 << USB_OUT_ACK_SENT)
298 | (1 << USB_OUT_NAK_SENT)
299 | (1 << USB_IN_ACK_RCVD)
300 | (1 << USB_IN_NAK_SENT)
301 | (1 << USB_STALL_SENT)
302 | (1 << LOCAL_OUT_ZLP)
303 | (1 << BUFFER_FLUSH));
305 /* fifo size is handled seperately */
308 static int net2272_disable(struct usb_ep *_ep)
310 struct net2272_ep *ep;
313 ep = container_of(_ep, struct net2272_ep, ep);
314 if (!_ep || !ep->desc || _ep->name == ep0name)
317 spin_lock_irqsave(&ep->dev->lock, flags);
318 net2272_dequeue_all(ep);
319 net2272_ep_reset(ep);
321 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
323 spin_unlock_irqrestore(&ep->dev->lock, flags);
327 /*---------------------------------------------------------------------------*/
329 static struct usb_request *
330 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
332 struct net2272_ep *ep;
333 struct net2272_request *req;
337 ep = container_of(_ep, struct net2272_ep, ep);
339 req = kzalloc(sizeof(*req), gfp_flags);
343 INIT_LIST_HEAD(&req->queue);
349 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
351 struct net2272_ep *ep;
352 struct net2272_request *req;
354 ep = container_of(_ep, struct net2272_ep, ep);
358 req = container_of(_req, struct net2272_request, req);
359 WARN_ON(!list_empty(&req->queue));
364 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
367 unsigned stopped = ep->stopped;
370 if (ep->dev->protocol_stall) {
377 list_del_init(&req->queue);
379 if (req->req.status == -EINPROGRESS)
380 req->req.status = status;
382 status = req->req.status;
385 if (use_dma && ep->dma)
386 usb_gadget_unmap_request(&dev->gadget, &req->req,
389 if (status && status != -ESHUTDOWN)
390 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
391 ep->ep.name, &req->req, status,
392 req->req.actual, req->req.length, req->req.buf);
394 /* don't modify queue heads during completion callback */
396 spin_unlock(&dev->lock);
397 usb_gadget_giveback_request(&ep->ep, &req->req);
398 spin_lock(&dev->lock);
399 ep->stopped = stopped;
403 net2272_write_packet(struct net2272_ep *ep, u8 *buf,
404 struct net2272_request *req, unsigned max)
406 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
408 unsigned length, count;
411 length = min(req->req.length - req->req.actual, max);
412 req->req.actual += length;
414 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
415 ep->ep.name, req, max, length,
416 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
421 while (likely(count >= 2)) {
422 /* no byte-swap required; chip endian set during init */
423 writew(*bufp++, ep_data);
428 /* write final byte by placing the NET2272 into 8-bit mode */
429 if (unlikely(count)) {
430 tmp = net2272_read(ep->dev, LOCCTL);
431 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
432 writeb(*buf, ep_data);
433 net2272_write(ep->dev, LOCCTL, tmp);
438 /* returns: 0: still running, 1: completed, negative: errno */
440 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
446 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
447 ep->ep.name, req->req.actual, req->req.length);
450 * Keep loading the endpoint until the final packet is loaded,
451 * or the endpoint buffer is full.
455 * Clear interrupt status
456 * - Packet Transmitted interrupt will become set again when the
457 * host successfully takes another packet
459 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
460 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
461 buf = req->req.buf + req->req.actual;
465 net2272_ep_read(ep, EP_STAT0);
467 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
468 (net2272_ep_read(ep, EP_AVAIL0));
470 if (max < ep->ep.maxpacket)
471 max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
472 | (net2272_ep_read(ep, EP_AVAIL0));
474 count = net2272_write_packet(ep, buf, req, max);
475 /* see if we are done */
476 if (req->req.length == req->req.actual) {
477 /* validate short or zlp packet */
478 if (count < ep->ep.maxpacket)
479 set_fifo_bytecount(ep, 0);
480 net2272_done(ep, req, 0);
482 if (!list_empty(&ep->queue)) {
483 req = list_entry(ep->queue.next,
484 struct net2272_request,
486 status = net2272_kick_dma(ep, req);
489 if ((net2272_ep_read(ep, EP_STAT0)
490 & (1 << BUFFER_EMPTY)))
495 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
501 net2272_out_flush(struct net2272_ep *ep)
503 ASSERT_OUT_NAKING(ep);
505 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
506 | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
507 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
511 net2272_read_packet(struct net2272_ep *ep, u8 *buf,
512 struct net2272_request *req, unsigned avail)
514 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
518 req->req.actual += avail;
520 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
521 ep->ep.name, req, avail,
522 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
524 is_short = (avail < ep->ep.maxpacket);
526 if (unlikely(avail == 0)) {
527 /* remove any zlp from the buffer */
528 (void)readw(ep_data);
532 /* Ensure we get the final byte */
533 if (unlikely(avail % 2))
538 *bufp++ = readw(ep_data);
543 * To avoid false endpoint available race condition must read
544 * ep stat0 twice in the case of a short transfer
546 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
547 net2272_ep_read(ep, EP_STAT0);
553 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
562 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
563 ep->ep.name, req->req.actual, req->req.length);
567 buf = req->req.buf + req->req.actual;
570 count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
571 | net2272_ep_read(ep, EP_AVAIL0);
573 net2272_ep_write(ep, EP_STAT0,
574 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
575 (1 << DATA_PACKET_RECEIVED_INTERRUPT));
577 tmp = req->req.length - req->req.actual;
580 if ((tmp % ep->ep.maxpacket) != 0) {
581 dev_err(ep->dev->dev,
582 "%s out fifo %d bytes, expected %d\n",
583 ep->ep.name, count, tmp);
586 count = (tmp > 0) ? tmp : 0;
589 is_short = net2272_read_packet(ep, buf, req, count);
592 if (unlikely(cleanup || is_short ||
593 ((req->req.actual == req->req.length)
594 && !req->req.zero))) {
597 net2272_out_flush(ep);
598 net2272_done(ep, req, -EOVERFLOW);
600 net2272_done(ep, req, 0);
602 /* re-initialize endpoint transfer registers
603 * otherwise they may result in erroneous pre-validation
604 * for subsequent control reads
606 if (unlikely(ep->num == 0)) {
607 net2272_ep_write(ep, EP_TRANSFER2, 0);
608 net2272_ep_write(ep, EP_TRANSFER1, 0);
609 net2272_ep_write(ep, EP_TRANSFER0, 0);
612 if (!list_empty(&ep->queue)) {
613 req = list_entry(ep->queue.next,
614 struct net2272_request, queue);
615 status = net2272_kick_dma(ep, req);
617 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
622 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
628 net2272_pio_advance(struct net2272_ep *ep)
630 struct net2272_request *req;
632 if (unlikely(list_empty(&ep->queue)))
635 req = list_entry(ep->queue.next, struct net2272_request, queue);
636 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
639 /* returns 0 on success, else negative errno */
641 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
642 unsigned len, unsigned dir)
644 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
647 /* The NET2272 only supports a single dma channel */
651 * EP_TRANSFER (used to determine the number of bytes received
652 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
654 if ((dir == 1) && (len > 0x1000000))
659 /* initialize platform's dma */
661 /* NET2272 addr, buffer addr, length, etc. */
662 switch (dev->dev_id) {
663 case PCI_DEVICE_ID_RDK1:
664 /* Setup PLX 9054 DMA mode */
665 writel((1 << LOCAL_BUS_WIDTH) |
666 (1 << TA_READY_INPUT_ENABLE) |
667 (0 << LOCAL_BURST_ENABLE) |
668 (1 << DONE_INTERRUPT_ENABLE) |
669 (1 << LOCAL_ADDRESSING_MODE) |
671 (1 << DMA_EOT_ENABLE) |
672 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
673 (1 << DMA_CHANNEL_INTERRUPT_SELECT),
674 dev->rdk1.plx9054_base_addr + DMAMODE0);
676 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
677 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
678 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
679 writel((dir << DIRECTION_OF_TRANSFER) |
680 (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
681 dev->rdk1.plx9054_base_addr + DMADPR0);
682 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
683 readl(dev->rdk1.plx9054_base_addr + INTCSR),
684 dev->rdk1.plx9054_base_addr + INTCSR);
690 net2272_write(dev, DMAREQ,
691 (0 << DMA_BUFFER_VALID) |
692 (1 << DMA_REQUEST_ENABLE) |
693 (1 << DMA_CONTROL_DACK) |
694 (dev->dma_eot_polarity << EOT_POLARITY) |
695 (dev->dma_dack_polarity << DACK_POLARITY) |
696 (dev->dma_dreq_polarity << DREQ_POLARITY) |
697 ((ep >> 1) << DMA_ENDPOINT_SELECT));
699 (void) net2272_read(dev, SCRATCH);
705 net2272_start_dma(struct net2272 *dev)
707 /* start platform's dma controller */
709 switch (dev->dev_id) {
710 case PCI_DEVICE_ID_RDK1:
711 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
712 dev->rdk1.plx9054_base_addr + DMACSR0);
718 /* returns 0 on success, else negative errno */
720 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
725 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
728 /* don't use dma for odd-length transfers
729 * otherwise, we'd need to deal with the last byte with pio
731 if (req->req.length & 1)
734 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
735 ep->ep.name, req, (unsigned long long) req->req.dma);
737 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
739 /* The NET2272 can only use DMA on one endpoint at a time */
740 if (ep->dev->dma_busy)
743 /* Make sure we only DMA an even number of bytes (we'll use
744 * pio to complete the transfer)
746 size = req->req.length;
749 /* device-to-host transfer */
751 /* initialize platform's dma controller */
752 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
753 /* unable to obtain DMA channel; return error and use pio mode */
755 req->req.actual += size;
757 /* host-to-device transfer */
759 tmp = net2272_ep_read(ep, EP_STAT0);
761 /* initialize platform's dma controller */
762 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
763 /* unable to obtain DMA channel; return error and use pio mode */
766 if (!(tmp & (1 << BUFFER_EMPTY)))
772 /* allow the endpoint's buffer to fill */
773 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
775 /* this transfer completed and data's already in the fifo
776 * return error so pio gets used.
778 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
781 net2272_write(ep->dev, DMAREQ,
782 (0 << DMA_BUFFER_VALID) |
783 (0 << DMA_REQUEST_ENABLE) |
784 (1 << DMA_CONTROL_DACK) |
785 (ep->dev->dma_eot_polarity << EOT_POLARITY) |
786 (ep->dev->dma_dack_polarity << DACK_POLARITY) |
787 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
788 ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
794 /* Don't use per-packet interrupts: use dma interrupts only */
795 net2272_ep_write(ep, EP_IRQENB, 0);
797 net2272_start_dma(ep->dev);
802 static void net2272_cancel_dma(struct net2272 *dev)
805 switch (dev->dev_id) {
806 case PCI_DEVICE_ID_RDK1:
807 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
808 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
809 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
810 (1 << CHANNEL_DONE)))
811 continue; /* wait for dma to stabalize */
813 /* dma abort generates an interrupt */
814 writeb(1 << CHANNEL_CLEAR_INTERRUPT,
815 dev->rdk1.plx9054_base_addr + DMACSR0);
823 /*---------------------------------------------------------------------------*/
826 net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
828 struct net2272_request *req;
829 struct net2272_ep *ep;
835 req = container_of(_req, struct net2272_request, req);
836 if (!_req || !_req->complete || !_req->buf
837 || !list_empty(&req->queue))
839 ep = container_of(_ep, struct net2272_ep, ep);
840 if (!_ep || (!ep->desc && ep->num != 0))
843 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
846 /* set up dma mapping in case the caller didn't */
847 if (use_dma && ep->dma) {
848 status = usb_gadget_map_request(&dev->gadget, _req,
854 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
855 _ep->name, _req, _req->length, _req->buf,
856 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
858 spin_lock_irqsave(&dev->lock, flags);
860 _req->status = -EINPROGRESS;
863 /* kickstart this i/o queue? */
864 if (list_empty(&ep->queue) && !ep->stopped) {
865 /* maybe there's no control data, just status ack */
866 if (ep->num == 0 && _req->length == 0) {
867 net2272_done(ep, req, 0);
868 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
872 /* Return zlp, don't let it block subsequent packets */
873 s = net2272_ep_read(ep, EP_STAT0);
874 if (s & (1 << BUFFER_EMPTY)) {
875 /* Buffer is empty check for a blocking zlp, handle it */
876 if ((s & (1 << NAK_OUT_PACKETS)) &&
877 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
878 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
880 * Request is going to terminate with a short packet ...
881 * hope the client is ready for it!
883 status = net2272_read_fifo(ep, req);
884 /* clear short packet naking */
885 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
891 status = net2272_kick_dma(ep, req);
894 /* dma failed (most likely in use by another endpoint)
900 status = net2272_write_fifo(ep, req);
902 s = net2272_ep_read(ep, EP_STAT0);
903 if ((s & (1 << BUFFER_EMPTY)) == 0)
904 status = net2272_read_fifo(ep, req);
907 if (unlikely(status != 0)) {
915 list_add_tail(&req->queue, &ep->queue);
917 if (likely(!list_empty(&ep->queue)))
918 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
920 spin_unlock_irqrestore(&dev->lock, flags);
925 /* dequeue ALL requests */
927 net2272_dequeue_all(struct net2272_ep *ep)
929 struct net2272_request *req;
931 /* called with spinlock held */
934 while (!list_empty(&ep->queue)) {
935 req = list_entry(ep->queue.next,
936 struct net2272_request,
938 net2272_done(ep, req, -ESHUTDOWN);
942 /* dequeue JUST ONE request */
944 net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
946 struct net2272_ep *ep;
947 struct net2272_request *req;
951 ep = container_of(_ep, struct net2272_ep, ep);
952 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
955 spin_lock_irqsave(&ep->dev->lock, flags);
956 stopped = ep->stopped;
959 /* make sure it's still queued on this endpoint */
960 list_for_each_entry(req, &ep->queue, queue) {
961 if (&req->req == _req)
964 if (&req->req != _req) {
965 ep->stopped = stopped;
966 spin_unlock_irqrestore(&ep->dev->lock, flags);
970 /* queue head may be partially complete */
971 if (ep->queue.next == &req->queue) {
972 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
973 net2272_done(ep, req, -ECONNRESET);
976 ep->stopped = stopped;
978 spin_unlock_irqrestore(&ep->dev->lock, flags);
982 /*---------------------------------------------------------------------------*/
985 net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
987 struct net2272_ep *ep;
991 ep = container_of(_ep, struct net2272_ep, ep);
992 if (!_ep || (!ep->desc && ep->num != 0))
994 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
996 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
999 spin_lock_irqsave(&ep->dev->lock, flags);
1000 if (!list_empty(&ep->queue))
1002 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
1005 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1006 value ? "set" : "clear",
1007 wedged ? "wedge" : "halt");
1011 ep->dev->protocol_stall = 1;
1021 spin_unlock_irqrestore(&ep->dev->lock, flags);
1027 net2272_set_halt(struct usb_ep *_ep, int value)
1029 return net2272_set_halt_and_wedge(_ep, value, 0);
1033 net2272_set_wedge(struct usb_ep *_ep)
1035 if (!_ep || _ep->name == ep0name)
1037 return net2272_set_halt_and_wedge(_ep, 1, 1);
1041 net2272_fifo_status(struct usb_ep *_ep)
1043 struct net2272_ep *ep;
1046 ep = container_of(_ep, struct net2272_ep, ep);
1047 if (!_ep || (!ep->desc && ep->num != 0))
1049 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1052 avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1053 avail |= net2272_ep_read(ep, EP_AVAIL0);
1054 if (avail > ep->fifo_size)
1057 avail = ep->fifo_size - avail;
1062 net2272_fifo_flush(struct usb_ep *_ep)
1064 struct net2272_ep *ep;
1066 ep = container_of(_ep, struct net2272_ep, ep);
1067 if (!_ep || (!ep->desc && ep->num != 0))
1069 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1072 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1075 static struct usb_ep_ops net2272_ep_ops = {
1076 .enable = net2272_enable,
1077 .disable = net2272_disable,
1079 .alloc_request = net2272_alloc_request,
1080 .free_request = net2272_free_request,
1082 .queue = net2272_queue,
1083 .dequeue = net2272_dequeue,
1085 .set_halt = net2272_set_halt,
1086 .set_wedge = net2272_set_wedge,
1087 .fifo_status = net2272_fifo_status,
1088 .fifo_flush = net2272_fifo_flush,
1091 /*---------------------------------------------------------------------------*/
1094 net2272_get_frame(struct usb_gadget *_gadget)
1096 struct net2272 *dev;
1097 unsigned long flags;
1102 dev = container_of(_gadget, struct net2272, gadget);
1103 spin_lock_irqsave(&dev->lock, flags);
1105 ret = net2272_read(dev, FRAME1) << 8;
1106 ret |= net2272_read(dev, FRAME0);
1108 spin_unlock_irqrestore(&dev->lock, flags);
1113 net2272_wakeup(struct usb_gadget *_gadget)
1115 struct net2272 *dev;
1117 unsigned long flags;
1121 dev = container_of(_gadget, struct net2272, gadget);
1123 spin_lock_irqsave(&dev->lock, flags);
1124 tmp = net2272_read(dev, USBCTL0);
1125 if (tmp & (1 << IO_WAKEUP_ENABLE))
1126 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1128 spin_unlock_irqrestore(&dev->lock, flags);
1134 net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1139 _gadget->is_selfpowered = (value != 0);
1145 net2272_pullup(struct usb_gadget *_gadget, int is_on)
1147 struct net2272 *dev;
1149 unsigned long flags;
1153 dev = container_of(_gadget, struct net2272, gadget);
1155 spin_lock_irqsave(&dev->lock, flags);
1156 tmp = net2272_read(dev, USBCTL0);
1157 dev->softconnect = (is_on != 0);
1159 tmp |= (1 << USB_DETECT_ENABLE);
1161 tmp &= ~(1 << USB_DETECT_ENABLE);
1162 net2272_write(dev, USBCTL0, tmp);
1163 spin_unlock_irqrestore(&dev->lock, flags);
1168 static int net2272_start(struct usb_gadget *_gadget,
1169 struct usb_gadget_driver *driver);
1170 static int net2272_stop(struct usb_gadget *_gadget);
1172 static const struct usb_gadget_ops net2272_ops = {
1173 .get_frame = net2272_get_frame,
1174 .wakeup = net2272_wakeup,
1175 .set_selfpowered = net2272_set_selfpowered,
1176 .pullup = net2272_pullup,
1177 .udc_start = net2272_start,
1178 .udc_stop = net2272_stop,
1181 /*---------------------------------------------------------------------------*/
1184 registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1186 struct net2272 *dev;
1189 unsigned long flags;
1194 dev = dev_get_drvdata(_dev);
1197 spin_lock_irqsave(&dev->lock, flags);
1200 s = dev->driver->driver.name;
1204 /* Main Control Registers */
1205 t = scnprintf(next, size, "%s version %s,"
1206 "chiprev %02x, locctl %02x\n"
1207 "irqenb0 %02x irqenb1 %02x "
1208 "irqstat0 %02x irqstat1 %02x\n",
1209 driver_name, driver_vers, dev->chiprev,
1210 net2272_read(dev, LOCCTL),
1211 net2272_read(dev, IRQENB0),
1212 net2272_read(dev, IRQENB1),
1213 net2272_read(dev, IRQSTAT0),
1214 net2272_read(dev, IRQSTAT1));
1219 t1 = net2272_read(dev, DMAREQ);
1220 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1221 t1, ep_name[(t1 & 0x01) + 1],
1222 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1223 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1224 t1 & (1 << DMA_REQUEST) ? "req " : "",
1225 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1229 /* USB Control Registers */
1230 t1 = net2272_read(dev, USBCTL1);
1231 if (t1 & (1 << VBUS_PIN)) {
1232 if (t1 & (1 << USB_HIGH_SPEED))
1234 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1240 t = scnprintf(next, size,
1241 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1242 net2272_read(dev, USBCTL0), t1,
1243 net2272_read(dev, OURADDR), s);
1247 /* Endpoint Registers */
1248 for (i = 0; i < 4; ++i) {
1249 struct net2272_ep *ep;
1255 t1 = net2272_ep_read(ep, EP_CFG);
1256 t2 = net2272_ep_read(ep, EP_RSPSET);
1257 t = scnprintf(next, size,
1258 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1260 ep->ep.name, t1, t2,
1261 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1262 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1263 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1264 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1265 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1266 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1267 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1268 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1269 net2272_ep_read(ep, EP_IRQENB));
1273 t = scnprintf(next, size,
1274 "\tstat0 %02x stat1 %02x avail %04x "
1276 net2272_ep_read(ep, EP_STAT0),
1277 net2272_ep_read(ep, EP_STAT1),
1278 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1280 ep->is_in ? "in" : "out",
1281 type_string(t1 >> 5),
1282 ep->stopped ? "*" : "");
1286 t = scnprintf(next, size,
1287 "\tep_transfer %06x\n",
1288 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1289 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1290 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1294 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1295 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1296 t = scnprintf(next, size,
1297 "\tbuf-a %s buf-b %s\n",
1298 buf_state_string(t1),
1299 buf_state_string(t2));
1304 spin_unlock_irqrestore(&dev->lock, flags);
1306 return PAGE_SIZE - size;
1308 static DEVICE_ATTR_RO(registers);
1310 /*---------------------------------------------------------------------------*/
1313 net2272_set_fifo_mode(struct net2272 *dev, int mode)
1317 tmp = net2272_read(dev, LOCCTL) & 0x3f;
1319 net2272_write(dev, LOCCTL, tmp);
1321 INIT_LIST_HEAD(&dev->gadget.ep_list);
1323 /* always ep-a, ep-c ... maybe not ep-b */
1324 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1328 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1329 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1332 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1333 dev->ep[1].fifo_size = 1024;
1334 dev->ep[2].fifo_size = 512;
1337 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1338 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1341 dev->ep[1].fifo_size = 1024;
1345 /* ep-c is always 2 512 byte buffers */
1346 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1347 dev->ep[3].fifo_size = 512;
1350 /*---------------------------------------------------------------------------*/
1353 net2272_usb_reset(struct net2272 *dev)
1355 dev->gadget.speed = USB_SPEED_UNKNOWN;
1357 net2272_cancel_dma(dev);
1359 net2272_write(dev, IRQENB0, 0);
1360 net2272_write(dev, IRQENB1, 0);
1362 /* clear irq state */
1363 net2272_write(dev, IRQSTAT0, 0xff);
1364 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1366 net2272_write(dev, DMAREQ,
1367 (0 << DMA_BUFFER_VALID) |
1368 (0 << DMA_REQUEST_ENABLE) |
1369 (1 << DMA_CONTROL_DACK) |
1370 (dev->dma_eot_polarity << EOT_POLARITY) |
1371 (dev->dma_dack_polarity << DACK_POLARITY) |
1372 (dev->dma_dreq_polarity << DREQ_POLARITY) |
1373 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1375 net2272_cancel_dma(dev);
1376 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1378 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1379 * note that the higher level gadget drivers are expected to convert data to little endian.
1380 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1382 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1383 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1387 net2272_usb_reinit(struct net2272 *dev)
1391 /* basic endpoint init */
1392 for (i = 0; i < 4; ++i) {
1393 struct net2272_ep *ep = &dev->ep[i];
1395 ep->ep.name = ep_name[i];
1400 if (use_dma && ep->num == dma_ep)
1403 if (i > 0 && i <= 3)
1404 ep->fifo_size = 512;
1407 net2272_ep_reset(ep);
1410 ep->ep.caps.type_control = true;
1412 ep->ep.caps.type_iso = true;
1413 ep->ep.caps.type_bulk = true;
1414 ep->ep.caps.type_int = true;
1417 ep->ep.caps.dir_in = true;
1418 ep->ep.caps.dir_out = true;
1420 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1422 dev->gadget.ep0 = &dev->ep[0].ep;
1423 dev->ep[0].stopped = 0;
1424 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1428 net2272_ep0_start(struct net2272 *dev)
1430 struct net2272_ep *ep0 = &dev->ep[0];
1432 net2272_ep_write(ep0, EP_RSPSET,
1433 (1 << NAK_OUT_PACKETS_MODE) |
1434 (1 << ALT_NAK_OUT_PACKETS));
1435 net2272_ep_write(ep0, EP_RSPCLR,
1436 (1 << HIDE_STATUS_PHASE) |
1437 (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1438 net2272_write(dev, USBCTL0,
1439 (dev->softconnect << USB_DETECT_ENABLE) |
1440 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1441 (1 << IO_WAKEUP_ENABLE));
1442 net2272_write(dev, IRQENB0,
1443 (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1444 (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1445 (1 << DMA_DONE_INTERRUPT_ENABLE));
1446 net2272_write(dev, IRQENB1,
1447 (1 << VBUS_INTERRUPT_ENABLE) |
1448 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1449 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1452 /* when a driver is successfully registered, it will receive
1453 * control requests including set_configuration(), which enables
1454 * non-control requests. then usb traffic follows until a
1455 * disconnect is reported. then a host may connect again, or
1456 * the driver might get unbound.
1458 static int net2272_start(struct usb_gadget *_gadget,
1459 struct usb_gadget_driver *driver)
1461 struct net2272 *dev;
1464 if (!driver || !driver->setup ||
1465 driver->max_speed != USB_SPEED_HIGH)
1468 dev = container_of(_gadget, struct net2272, gadget);
1470 for (i = 0; i < 4; ++i)
1471 dev->ep[i].irqs = 0;
1472 /* hook up the driver ... */
1473 dev->softconnect = 1;
1474 driver->driver.bus = NULL;
1475 dev->driver = driver;
1477 /* ... then enable host detection and ep0; and we're ready
1478 * for set_configuration as well as eventual disconnect.
1480 net2272_ep0_start(dev);
1486 stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1490 /* don't disconnect if it's not connected */
1491 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1494 /* stop hardware; prevent new request submissions;
1495 * and kill any outstanding requests.
1497 net2272_usb_reset(dev);
1498 for (i = 0; i < 4; ++i)
1499 net2272_dequeue_all(&dev->ep[i]);
1501 /* report disconnect; the driver is already quiesced */
1503 spin_unlock(&dev->lock);
1504 driver->disconnect(&dev->gadget);
1505 spin_lock(&dev->lock);
1508 net2272_usb_reinit(dev);
1511 static int net2272_stop(struct usb_gadget *_gadget)
1513 struct net2272 *dev;
1514 unsigned long flags;
1516 dev = container_of(_gadget, struct net2272, gadget);
1518 spin_lock_irqsave(&dev->lock, flags);
1519 stop_activity(dev, NULL);
1520 spin_unlock_irqrestore(&dev->lock, flags);
1527 /*---------------------------------------------------------------------------*/
1528 /* handle ep-a/ep-b dma completions */
1530 net2272_handle_dma(struct net2272_ep *ep)
1532 struct net2272_request *req;
1536 if (!list_empty(&ep->queue))
1537 req = list_entry(ep->queue.next,
1538 struct net2272_request, queue);
1542 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1544 /* Ensure DREQ is de-asserted */
1545 net2272_write(ep->dev, DMAREQ,
1546 (0 << DMA_BUFFER_VALID)
1547 | (0 << DMA_REQUEST_ENABLE)
1548 | (1 << DMA_CONTROL_DACK)
1549 | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1550 | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1551 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1552 | (ep->dma << DMA_ENDPOINT_SELECT));
1554 ep->dev->dma_busy = 0;
1556 net2272_ep_write(ep, EP_IRQENB,
1557 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1558 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1559 | net2272_ep_read(ep, EP_IRQENB));
1561 /* device-to-host transfer completed */
1563 /* validate a short packet or zlp if necessary */
1564 if ((req->req.length % ep->ep.maxpacket != 0) ||
1566 set_fifo_bytecount(ep, 0);
1568 net2272_done(ep, req, 0);
1569 if (!list_empty(&ep->queue)) {
1570 req = list_entry(ep->queue.next,
1571 struct net2272_request, queue);
1572 status = net2272_kick_dma(ep, req);
1574 net2272_pio_advance(ep);
1577 /* host-to-device transfer completed */
1579 /* terminated with a short packet? */
1580 if (net2272_read(ep->dev, IRQSTAT0) &
1581 (1 << DMA_DONE_INTERRUPT)) {
1582 /* abort system dma */
1583 net2272_cancel_dma(ep->dev);
1586 /* EP_TRANSFER will contain the number of bytes
1587 * actually received.
1588 * NOTE: There is no overflow detection on EP_TRANSFER:
1589 * We can't deal with transfers larger than 2^24 bytes!
1591 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1592 | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1593 | (net2272_ep_read(ep, EP_TRANSFER0));
1598 req->req.actual += len;
1600 /* get any remaining data */
1601 net2272_pio_advance(ep);
1605 /*---------------------------------------------------------------------------*/
1608 net2272_handle_ep(struct net2272_ep *ep)
1610 struct net2272_request *req;
1613 if (!list_empty(&ep->queue))
1614 req = list_entry(ep->queue.next,
1615 struct net2272_request, queue);
1619 /* ack all, and handle what we care about */
1620 stat0 = net2272_ep_read(ep, EP_STAT0);
1621 stat1 = net2272_ep_read(ep, EP_STAT1);
1624 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1625 ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1627 net2272_ep_write(ep, EP_STAT0, stat0 &
1628 ~((1 << NAK_OUT_PACKETS)
1629 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1630 net2272_ep_write(ep, EP_STAT1, stat1);
1632 /* data packet(s) received (in the fifo, OUT)
1633 * direction must be validated, otherwise control read status phase
1634 * could be interpreted as a valid packet
1636 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1637 net2272_pio_advance(ep);
1638 /* data packet(s) transmitted (IN) */
1639 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1640 net2272_pio_advance(ep);
1643 static struct net2272_ep *
1644 net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1646 struct net2272_ep *ep;
1648 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1651 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1652 u8 bEndpointAddress;
1656 bEndpointAddress = ep->desc->bEndpointAddress;
1657 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1659 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1670 * JJJJJJJKKKKKKK * 8
1672 * {JKKKKKKK * 10}, JK
1674 static const u8 net2272_test_packet[] = {
1675 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1676 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1677 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1678 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1679 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1680 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1684 net2272_set_test_mode(struct net2272 *dev, int mode)
1688 /* Disable all net2272 interrupts:
1689 * Nothing but a power cycle should stop the test.
1691 net2272_write(dev, IRQENB0, 0x00);
1692 net2272_write(dev, IRQENB1, 0x00);
1694 /* Force tranceiver to high-speed */
1695 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1697 net2272_write(dev, PAGESEL, 0);
1698 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1699 net2272_write(dev, EP_RSPCLR,
1700 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1701 | (1 << HIDE_STATUS_PHASE));
1702 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1703 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1705 /* wait for status phase to complete */
1706 while (!(net2272_read(dev, EP_STAT0) &
1707 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1710 /* Enable test mode */
1711 net2272_write(dev, USBTEST, mode);
1713 /* load test packet */
1714 if (mode == TEST_PACKET) {
1715 /* switch to 8 bit mode */
1716 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1717 ~(1 << DATA_WIDTH));
1719 for (i = 0; i < sizeof(net2272_test_packet); ++i)
1720 net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1722 /* Validate test packet */
1723 net2272_write(dev, EP_TRANSFER0, 0);
1728 net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1730 struct net2272_ep *ep;
1733 /* starting a control request? */
1734 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1737 struct usb_ctrlrequest r;
1740 struct net2272_request *req;
1742 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1743 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1744 dev->gadget.speed = USB_SPEED_HIGH;
1746 dev->gadget.speed = USB_SPEED_FULL;
1747 dev_dbg(dev->dev, "%s\n",
1748 usb_speed_string(dev->gadget.speed));
1754 /* make sure any leftover interrupt state is cleared */
1755 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1756 while (!list_empty(&ep->queue)) {
1757 req = list_entry(ep->queue.next,
1758 struct net2272_request, queue);
1759 net2272_done(ep, req,
1760 (req->req.actual == req->req.length) ? 0 : -EPROTO);
1763 dev->protocol_stall = 0;
1764 net2272_ep_write(ep, EP_STAT0,
1765 (1 << DATA_IN_TOKEN_INTERRUPT)
1766 | (1 << DATA_OUT_TOKEN_INTERRUPT)
1767 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1768 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1769 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1770 net2272_ep_write(ep, EP_STAT1,
1772 | (1 << USB_OUT_ACK_SENT)
1773 | (1 << USB_OUT_NAK_SENT)
1774 | (1 << USB_IN_ACK_RCVD)
1775 | (1 << USB_IN_NAK_SENT)
1776 | (1 << USB_STALL_SENT)
1777 | (1 << LOCAL_OUT_ZLP));
1780 * Ensure Control Read pre-validation setting is beyond maximum size
1781 * - Control Writes can leave non-zero values in EP_TRANSFER. If
1782 * an EP0 transfer following the Control Write is a Control Read,
1783 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1784 * pre-validation count.
1785 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1786 * the pre-validation count cannot cause an unexpected validatation
1788 net2272_write(dev, PAGESEL, 0);
1789 net2272_write(dev, EP_TRANSFER2, 0xff);
1790 net2272_write(dev, EP_TRANSFER1, 0xff);
1791 net2272_write(dev, EP_TRANSFER0, 0xff);
1793 u.raw[0] = net2272_read(dev, SETUP0);
1794 u.raw[1] = net2272_read(dev, SETUP1);
1795 u.raw[2] = net2272_read(dev, SETUP2);
1796 u.raw[3] = net2272_read(dev, SETUP3);
1797 u.raw[4] = net2272_read(dev, SETUP4);
1798 u.raw[5] = net2272_read(dev, SETUP5);
1799 u.raw[6] = net2272_read(dev, SETUP6);
1800 u.raw[7] = net2272_read(dev, SETUP7);
1802 * If you have a big endian cpu make sure le16_to_cpus
1803 * performs the proper byte swapping here...
1805 le16_to_cpus(&u.r.wValue);
1806 le16_to_cpus(&u.r.wIndex);
1807 le16_to_cpus(&u.r.wLength);
1810 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1811 stat ^= (1 << SETUP_PACKET_INTERRUPT);
1813 /* watch control traffic at the token level, and force
1814 * synchronization before letting the status phase happen.
1816 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1818 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1819 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1820 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1821 stop_out_naking(ep);
1823 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1824 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1825 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1826 net2272_ep_write(ep, EP_IRQENB, scratch);
1828 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1830 switch (u.r.bRequest) {
1831 case USB_REQ_GET_STATUS: {
1832 struct net2272_ep *e;
1835 switch (u.r.bRequestType & USB_RECIP_MASK) {
1836 case USB_RECIP_ENDPOINT:
1837 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1838 if (!e || u.r.wLength > 2)
1840 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1841 status = cpu_to_le16(1);
1843 status = cpu_to_le16(0);
1845 /* don't bother with a request object! */
1846 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1847 writew(status, net2272_reg_addr(dev, EP_DATA));
1848 set_fifo_bytecount(&dev->ep[0], 0);
1850 dev_vdbg(dev->dev, "%s stat %02x\n",
1851 ep->ep.name, status);
1852 goto next_endpoints;
1853 case USB_RECIP_DEVICE:
1854 if (u.r.wLength > 2)
1856 if (dev->gadget.is_selfpowered)
1857 status = (1 << USB_DEVICE_SELF_POWERED);
1859 /* don't bother with a request object! */
1860 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1861 writew(status, net2272_reg_addr(dev, EP_DATA));
1862 set_fifo_bytecount(&dev->ep[0], 0);
1864 dev_vdbg(dev->dev, "device stat %02x\n", status);
1865 goto next_endpoints;
1866 case USB_RECIP_INTERFACE:
1867 if (u.r.wLength > 2)
1870 /* don't bother with a request object! */
1871 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1872 writew(status, net2272_reg_addr(dev, EP_DATA));
1873 set_fifo_bytecount(&dev->ep[0], 0);
1875 dev_vdbg(dev->dev, "interface status %02x\n", status);
1876 goto next_endpoints;
1881 case USB_REQ_CLEAR_FEATURE: {
1882 struct net2272_ep *e;
1884 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1886 if (u.r.wValue != USB_ENDPOINT_HALT ||
1889 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1893 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1896 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1900 goto next_endpoints;
1902 case USB_REQ_SET_FEATURE: {
1903 struct net2272_ep *e;
1905 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1906 if (u.r.wIndex != NORMAL_OPERATION)
1907 net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1909 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1910 goto next_endpoints;
1911 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1913 if (u.r.wValue != USB_ENDPOINT_HALT ||
1916 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1921 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1922 goto next_endpoints;
1924 case USB_REQ_SET_ADDRESS: {
1925 net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1931 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1933 u.r.bRequestType, u.r.bRequest,
1934 u.r.wValue, u.r.wIndex,
1935 net2272_ep_read(ep, EP_CFG));
1936 spin_unlock(&dev->lock);
1937 tmp = dev->driver->setup(&dev->gadget, &u.r);
1938 spin_lock(&dev->lock);
1941 /* stall ep0 on error */
1944 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1945 u.r.bRequestType, u.r.bRequest, tmp);
1946 dev->protocol_stall = 1;
1948 /* endpoint dma irq? */
1949 } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1950 net2272_cancel_dma(dev);
1951 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1952 stat &= ~(1 << DMA_DONE_INTERRUPT);
1953 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1957 net2272_handle_dma(ep);
1961 /* endpoint data irq? */
1962 scratch = stat & 0x0f;
1964 for (num = 0; scratch; num++) {
1967 /* does this endpoint's FIFO and queue need tending? */
1969 if ((scratch & t) == 0)
1974 net2272_handle_ep(ep);
1977 /* some interrupts we can just ignore */
1978 stat &= ~(1 << SOF_INTERRUPT);
1981 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1985 net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1989 /* after disconnect there's nothing else to do! */
1990 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1991 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1995 bool disconnect = false;
1998 * Ignore disconnects and resets if the speed hasn't been set.
1999 * VBUS can bounce and there's always an initial reset.
2001 net2272_write(dev, IRQSTAT1, tmp);
2002 if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
2003 if ((stat & (1 << VBUS_INTERRUPT)) &&
2004 (net2272_read(dev, USBCTL1) &
2005 (1 << VBUS_PIN)) == 0) {
2007 dev_dbg(dev->dev, "disconnect %s\n",
2008 dev->driver->driver.name);
2009 } else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2010 (net2272_read(dev, USBCTL1) & mask)
2013 dev_dbg(dev->dev, "reset %s\n",
2014 dev->driver->driver.name);
2017 if (disconnect || reset) {
2018 stop_activity(dev, dev->driver);
2019 net2272_ep0_start(dev);
2020 spin_unlock(&dev->lock);
2022 usb_gadget_udc_reset
2023 (&dev->gadget, dev->driver);
2025 (dev->driver->disconnect)
2027 spin_lock(&dev->lock);
2037 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2039 net2272_write(dev, IRQSTAT1, tmp);
2040 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2041 if (dev->driver->suspend)
2042 dev->driver->suspend(&dev->gadget);
2043 if (!enable_suspend) {
2044 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2045 dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2048 if (dev->driver->resume)
2049 dev->driver->resume(&dev->gadget);
2054 /* clear any other status/irqs */
2056 net2272_write(dev, IRQSTAT1, stat);
2058 /* some status we can just ignore */
2059 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2060 | (1 << SUSPEND_REQUEST_INTERRUPT)
2061 | (1 << RESUME_INTERRUPT));
2065 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2068 static irqreturn_t net2272_irq(int irq, void *_dev)
2070 struct net2272 *dev = _dev;
2071 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2074 #if defined(PLX_PCI_RDK)
2077 spin_lock(&dev->lock);
2078 #if defined(PLX_PCI_RDK)
2079 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2081 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2082 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2083 dev->rdk1.plx9054_base_addr + INTCSR);
2084 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2085 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2086 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2087 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2088 dev->rdk1.plx9054_base_addr + INTCSR);
2090 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2091 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2092 dev->rdk1.plx9054_base_addr + DMACSR0);
2094 dmareq = net2272_read(dev, DMAREQ);
2096 net2272_handle_dma(&dev->ep[2]);
2098 net2272_handle_dma(&dev->ep[1]);
2101 #if defined(PLX_PCI_RDK2)
2102 /* see if PCI int for us by checking irqstat */
2103 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2104 if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
2105 spin_unlock(&dev->lock);
2108 /* check dma interrupts */
2110 /* Platform/devcice interrupt handler */
2111 #if !defined(PLX_PCI_RDK)
2112 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2113 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2115 spin_unlock(&dev->lock);
2120 static int net2272_present(struct net2272 *dev)
2123 * Quick test to see if CPU can communicate properly with the NET2272.
2124 * Verifies connection using writes and reads to write/read and
2125 * read-only registers.
2127 * This routine is strongly recommended especially during early bring-up
2128 * of new hardware, however for designs that do not apply Power On System
2129 * Tests (POST) it may discarded (or perhaps minimized).
2134 /* Verify NET2272 write/read SCRATCH register can write and read */
2135 refval = net2272_read(dev, SCRATCH);
2136 for (ii = 0; ii < 0x100; ii += 7) {
2137 net2272_write(dev, SCRATCH, ii);
2138 val = net2272_read(dev, SCRATCH);
2141 "%s: write/read SCRATCH register test failed: "
2142 "wrote:0x%2.2x, read:0x%2.2x\n",
2147 /* To be nice, we write the original SCRATCH value back: */
2148 net2272_write(dev, SCRATCH, refval);
2150 /* Verify NET2272 CHIPREV register is read-only: */
2151 refval = net2272_read(dev, CHIPREV_2272);
2152 for (ii = 0; ii < 0x100; ii += 7) {
2153 net2272_write(dev, CHIPREV_2272, ii);
2154 val = net2272_read(dev, CHIPREV_2272);
2155 if (val != refval) {
2157 "%s: write/read CHIPREV register test failed: "
2158 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2159 __func__, ii, val, refval);
2165 * Verify NET2272's "NET2270 legacy revision" register
2166 * - NET2272 has two revision registers. The NET2270 legacy revision
2167 * register should read the same value, regardless of the NET2272
2168 * silicon revision. The legacy register applies to NET2270
2169 * firmware being applied to the NET2272.
2171 val = net2272_read(dev, CHIPREV_LEGACY);
2172 if (val != NET2270_LEGACY_REV) {
2174 * Unexpected legacy revision value
2175 * - Perhaps the chip is a NET2270?
2178 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2179 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2180 __func__, NET2270_LEGACY_REV, val);
2185 * Verify NET2272 silicon revision
2186 * - This revision register is appropriate for the silicon version
2189 val = net2272_read(dev, CHIPREV_2272);
2191 case CHIPREV_NET2272_R1:
2193 * NET2272 Rev 1 has DMA related errata:
2194 * - Newer silicon (Rev 1A or better) required
2197 "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2200 case CHIPREV_NET2272_R1A:
2203 /* NET2272 silicon version *may* not work with this firmware */
2205 "%s: unexpected silicon revision register value: "
2206 " CHIPREV_2272: 0x%2.2x\n",
2209 * Return Success, even though the chip rev is not an expected value
2210 * - Older, pre-built firmware can attempt to operate on newer silicon
2211 * - Often, new silicon is perfectly compatible
2215 /* Success: NET2272 checks out OK */
2220 net2272_gadget_release(struct device *_dev)
2222 struct net2272 *dev = dev_get_drvdata(_dev);
2226 /*---------------------------------------------------------------------------*/
2229 net2272_remove(struct net2272 *dev)
2231 usb_del_gadget_udc(&dev->gadget);
2232 free_irq(dev->irq, dev);
2233 iounmap(dev->base_addr);
2234 device_remove_file(dev->dev, &dev_attr_registers);
2236 dev_info(dev->dev, "unbind\n");
2239 static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2241 struct net2272 *ret;
2244 dev_dbg(dev, "No IRQ!\n");
2245 return ERR_PTR(-ENODEV);
2248 /* alloc, and start init */
2249 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2251 return ERR_PTR(-ENOMEM);
2253 spin_lock_init(&ret->lock);
2256 ret->gadget.ops = &net2272_ops;
2257 ret->gadget.max_speed = USB_SPEED_HIGH;
2259 /* the "gadget" abstracts/virtualizes the controller */
2260 ret->gadget.name = driver_name;
2266 net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2270 /* See if there... */
2271 if (net2272_present(dev)) {
2272 dev_warn(dev->dev, "2272 not found!\n");
2277 net2272_usb_reset(dev);
2278 net2272_usb_reinit(dev);
2280 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2282 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2286 dev->chiprev = net2272_read(dev, CHIPREV_2272);
2289 dev_info(dev->dev, "%s\n", driver_desc);
2290 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2291 dev->irq, dev->base_addr, dev->chiprev,
2293 dev_info(dev->dev, "version: %s\n", driver_vers);
2295 ret = device_create_file(dev->dev, &dev_attr_registers);
2299 ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget,
2300 net2272_gadget_release);
2307 device_remove_file(dev->dev, &dev_attr_registers);
2309 free_irq(dev->irq, dev);
2317 * wrap this driver around the specified device, but
2318 * don't respond over USB until a gadget driver binds to us
2322 net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2324 unsigned long resource, len, tmp;
2325 void __iomem *mem_mapped_addr[4];
2329 * BAR 0 holds PLX 9054 config registers
2330 * BAR 1 is i/o memory; unused here
2331 * BAR 2 holds EPLD config registers
2332 * BAR 3 holds NET2272 registers
2335 /* Find and map all address spaces */
2336 for (i = 0; i < 4; ++i) {
2338 continue; /* BAR1 unused */
2340 resource = pci_resource_start(pdev, i);
2341 len = pci_resource_len(pdev, i);
2343 if (!request_mem_region(resource, len, driver_name)) {
2344 dev_dbg(dev->dev, "controller already in use\n");
2349 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2350 if (mem_mapped_addr[i] == NULL) {
2351 release_mem_region(resource, len);
2352 dev_dbg(dev->dev, "can't map memory\n");
2358 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2359 dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2360 dev->base_addr = mem_mapped_addr[3];
2362 /* Set PLX 9054 bus width (16 bits) */
2363 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2364 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2365 dev->rdk1.plx9054_base_addr + LBRD1);
2367 /* Enable PLX 9054 Interrupts */
2368 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2369 (1 << PCI_INTERRUPT_ENABLE) |
2370 (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2371 dev->rdk1.plx9054_base_addr + INTCSR);
2373 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2374 dev->rdk1.plx9054_base_addr + DMACSR0);
2377 writeb((1 << EPLD_DMA_ENABLE) |
2378 (1 << DMA_CTL_DACK) |
2379 (1 << DMA_TIMEOUT_ENABLE) |
2383 (1 << NET2272_RESET),
2384 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2387 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2388 ~(1 << NET2272_RESET),
2389 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2396 iounmap(mem_mapped_addr[i]);
2397 release_mem_region(pci_resource_start(pdev, i),
2398 pci_resource_len(pdev, i));
2405 net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2407 unsigned long resource, len;
2408 void __iomem *mem_mapped_addr[2];
2412 * BAR 0 holds FGPA config registers
2413 * BAR 1 holds NET2272 registers
2416 /* Find and map all address spaces, bar2-3 unused in rdk 2 */
2417 for (i = 0; i < 2; ++i) {
2418 resource = pci_resource_start(pdev, i);
2419 len = pci_resource_len(pdev, i);
2421 if (!request_mem_region(resource, len, driver_name)) {
2422 dev_dbg(dev->dev, "controller already in use\n");
2427 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2428 if (mem_mapped_addr[i] == NULL) {
2429 release_mem_region(resource, len);
2430 dev_dbg(dev->dev, "can't map memory\n");
2436 dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2437 dev->base_addr = mem_mapped_addr[1];
2440 /* Set 2272 bus width (16 bits) and reset */
2441 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2443 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2444 /* Print fpga version number */
2445 dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2446 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2447 /* Enable FPGA Interrupts */
2448 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2454 iounmap(mem_mapped_addr[i]);
2455 release_mem_region(pci_resource_start(pdev, i),
2456 pci_resource_len(pdev, i));
2463 net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2465 struct net2272 *dev;
2468 dev = net2272_probe_init(&pdev->dev, pdev->irq);
2470 return PTR_ERR(dev);
2471 dev->dev_id = pdev->device;
2473 if (pci_enable_device(pdev) < 0) {
2478 pci_set_master(pdev);
2480 switch (pdev->device) {
2481 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2482 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2488 ret = net2272_probe_fin(dev, 0);
2492 pci_set_drvdata(pdev, dev);
2497 pci_disable_device(pdev);
2505 net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2509 /* disable PLX 9054 interrupts */
2510 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2511 ~(1 << PCI_INTERRUPT_ENABLE),
2512 dev->rdk1.plx9054_base_addr + INTCSR);
2514 /* clean up resources allocated during probe() */
2515 iounmap(dev->rdk1.plx9054_base_addr);
2516 iounmap(dev->rdk1.epld_base_addr);
2518 for (i = 0; i < 4; ++i) {
2520 continue; /* BAR1 unused */
2521 release_mem_region(pci_resource_start(pdev, i),
2522 pci_resource_len(pdev, i));
2527 net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2531 /* disable fpga interrupts
2532 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2533 ~(1 << PCI_INTERRUPT_ENABLE),
2534 dev->rdk1.plx9054_base_addr + INTCSR);
2537 /* clean up resources allocated during probe() */
2538 iounmap(dev->rdk2.fpga_base_addr);
2540 for (i = 0; i < 2; ++i)
2541 release_mem_region(pci_resource_start(pdev, i),
2542 pci_resource_len(pdev, i));
2546 net2272_pci_remove(struct pci_dev *pdev)
2548 struct net2272 *dev = pci_get_drvdata(pdev);
2550 net2272_remove(dev);
2552 switch (pdev->device) {
2553 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2554 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2558 pci_disable_device(pdev);
2563 /* Table of matching PCI IDs */
2564 static struct pci_device_id pci_ids[] = {
2566 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2568 .vendor = PCI_VENDOR_ID_PLX,
2569 .device = PCI_DEVICE_ID_RDK1,
2570 .subvendor = PCI_ANY_ID,
2571 .subdevice = PCI_ANY_ID,
2574 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2576 .vendor = PCI_VENDOR_ID_PLX,
2577 .device = PCI_DEVICE_ID_RDK2,
2578 .subvendor = PCI_ANY_ID,
2579 .subdevice = PCI_ANY_ID,
2583 MODULE_DEVICE_TABLE(pci, pci_ids);
2585 static struct pci_driver net2272_pci_driver = {
2586 .name = driver_name,
2587 .id_table = pci_ids,
2589 .probe = net2272_pci_probe,
2590 .remove = net2272_pci_remove,
2593 static int net2272_pci_register(void)
2595 return pci_register_driver(&net2272_pci_driver);
2598 static void net2272_pci_unregister(void)
2600 pci_unregister_driver(&net2272_pci_driver);
2604 static inline int net2272_pci_register(void) { return 0; }
2605 static inline void net2272_pci_unregister(void) { }
2608 /*---------------------------------------------------------------------------*/
2611 net2272_plat_probe(struct platform_device *pdev)
2613 struct net2272 *dev;
2615 unsigned int irqflags;
2616 resource_size_t base, len;
2617 struct resource *iomem, *iomem_bus, *irq_res;
2619 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2620 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2621 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2622 if (!irq_res || !iomem) {
2623 dev_err(&pdev->dev, "must provide irq/base addr");
2627 dev = net2272_probe_init(&pdev->dev, irq_res->start);
2629 return PTR_ERR(dev);
2632 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2633 irqflags |= IRQF_TRIGGER_RISING;
2634 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2635 irqflags |= IRQF_TRIGGER_FALLING;
2636 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2637 irqflags |= IRQF_TRIGGER_HIGH;
2638 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2639 irqflags |= IRQF_TRIGGER_LOW;
2641 base = iomem->start;
2642 len = resource_size(iomem);
2644 dev->base_shift = iomem_bus->start;
2646 if (!request_mem_region(base, len, driver_name)) {
2647 dev_dbg(dev->dev, "get request memory region!\n");
2651 dev->base_addr = ioremap_nocache(base, len);
2652 if (!dev->base_addr) {
2653 dev_dbg(dev->dev, "can't map memory\n");
2658 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2662 platform_set_drvdata(pdev, dev);
2663 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2664 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2669 iounmap(dev->base_addr);
2671 release_mem_region(base, len);
2679 net2272_plat_remove(struct platform_device *pdev)
2681 struct net2272 *dev = platform_get_drvdata(pdev);
2683 net2272_remove(dev);
2685 release_mem_region(pdev->resource[0].start,
2686 resource_size(&pdev->resource[0]));
2693 static struct platform_driver net2272_plat_driver = {
2694 .probe = net2272_plat_probe,
2695 .remove = net2272_plat_remove,
2697 .name = driver_name,
2699 /* FIXME .suspend, .resume */
2701 MODULE_ALIAS("platform:net2272");
2703 static int __init net2272_init(void)
2707 ret = net2272_pci_register();
2710 ret = platform_driver_register(&net2272_plat_driver);
2716 net2272_pci_unregister();
2719 module_init(net2272_init);
2721 static void __exit net2272_cleanup(void)
2723 net2272_pci_unregister();
2724 platform_driver_unregister(&net2272_plat_driver);
2726 module_exit(net2272_cleanup);
2728 MODULE_DESCRIPTION(DRIVER_DESC);
2729 MODULE_AUTHOR("PLX Technology, Inc.");
2730 MODULE_LICENSE("GPL");