1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe endpoint controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
6 #include <linux/delay.h>
7 #include <linux/kernel.h>
9 #include <linux/pci-epc.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sizes.h>
14 #include "pcie-cadence.h"
16 #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
17 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
18 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
21 * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
22 * @pcie: Cadence PCIe controller
23 * @max_regions: maximum number of regions supported by hardware
24 * @ob_region_map: bitmask of mapped outbound regions
25 * @ob_addr: base addresses in the AXI bus where the outbound regions start
26 * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
27 * dedicated outbound regions is mapped.
28 * @irq_cpu_addr: base address in the CPU space where a write access triggers
29 * the sending of a memory write (MSI) / normal message (legacy
30 * IRQ) TLP through the PCIe bus.
31 * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
32 * dedicated outbound region.
33 * @irq_pci_fn: the latest PCI function that has updated the mapping of
34 * the MSI/legacy IRQ dedicated outbound region.
35 * @irq_pending: bitmask of asserted legacy IRQs.
38 struct cdns_pcie pcie;
40 unsigned long ob_region_map;
42 phys_addr_t irq_phys_addr;
43 void __iomem *irq_cpu_addr;
49 static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
50 struct pci_epf_header *hdr)
52 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
53 struct cdns_pcie *pcie = &ep->pcie;
55 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
56 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
57 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
58 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
59 hdr->subclass_code | hdr->baseclass_code << 8);
60 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
61 hdr->cache_line_size);
62 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
63 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
66 * Vendor ID can only be modified from function 0, all other functions
67 * use the same vendor ID as function 0.
70 /* Update the vendor IDs. */
71 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
72 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
74 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
80 static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
81 struct pci_epf_bar *epf_bar)
83 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
84 struct cdns_pcie *pcie = &ep->pcie;
85 dma_addr_t bar_phys = epf_bar->phys_addr;
86 enum pci_barno bar = epf_bar->barno;
87 int flags = epf_bar->flags;
88 u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
91 /* BAR size is 2^(aperture + 7) */
92 sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
94 * roundup_pow_of_two() returns an unsigned long, which is not suited
97 sz = 1ULL << fls64(sz - 1);
98 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
100 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
101 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
103 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
104 bool is_64bits = sz > SZ_2G;
106 if (is_64bits && (bar & 1))
109 if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
110 epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
112 if (is_64bits && is_prefetch)
113 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
114 else if (is_prefetch)
115 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
117 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
119 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
122 addr0 = lower_32_bits(bar_phys);
123 addr1 = upper_32_bits(bar_phys);
124 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
126 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
130 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
133 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
137 cfg = cdns_pcie_readl(pcie, reg);
138 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
139 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
140 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
141 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
142 cdns_pcie_writel(pcie, reg, cfg);
147 static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
148 struct pci_epf_bar *epf_bar)
150 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
151 struct cdns_pcie *pcie = &ep->pcie;
152 enum pci_barno bar = epf_bar->barno;
153 u32 reg, cfg, b, ctrl;
156 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
159 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
163 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
164 cfg = cdns_pcie_readl(pcie, reg);
165 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
166 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
167 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
168 cdns_pcie_writel(pcie, reg, cfg);
170 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
171 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
174 static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
175 u64 pci_addr, size_t size)
177 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
178 struct cdns_pcie *pcie = &ep->pcie;
181 r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
182 if (r >= ep->max_regions - 1) {
183 dev_err(&epc->dev, "no free outbound region\n");
187 cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size);
189 set_bit(r, &ep->ob_region_map);
190 ep->ob_addr[r] = addr;
195 static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
198 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
199 struct cdns_pcie *pcie = &ep->pcie;
202 for (r = 0; r < ep->max_regions - 1; r++)
203 if (ep->ob_addr[r] == addr)
206 if (r == ep->max_regions - 1)
209 cdns_pcie_reset_outbound_region(pcie, r);
212 clear_bit(r, &ep->ob_region_map);
215 static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
217 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
218 struct cdns_pcie *pcie = &ep->pcie;
219 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
223 * Set the Multiple Message Capable bitfield into the Message Control
226 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
227 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
228 flags |= PCI_MSI_FLAGS_64BIT;
229 flags &= ~PCI_MSI_FLAGS_MASKBIT;
230 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
235 static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
237 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
238 struct cdns_pcie *pcie = &ep->pcie;
239 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
242 /* Validate that the MSI feature is actually enabled. */
243 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
244 if (!(flags & PCI_MSI_FLAGS_ENABLE))
248 * Get the Multiple Message Enable bitfield from the Message Control
251 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
256 static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
257 u8 intx, bool is_asserted)
259 struct cdns_pcie *pcie = &ep->pcie;
266 /* Set the outbound region if needed. */
267 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
268 ep->irq_pci_fn != fn)) {
269 /* First region was reserved for IRQ writes. */
270 cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0,
272 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
277 ep->irq_pending |= BIT(intx);
278 msg_code = MSG_CODE_ASSERT_INTA + intx;
280 ep->irq_pending &= ~BIT(intx);
281 msg_code = MSG_CODE_DEASSERT_INTA + intx;
284 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
285 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
286 status ^= PCI_STATUS_INTERRUPT;
287 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
290 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
291 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
292 CDNS_PCIE_MSG_NO_DATA;
293 writel(0, ep->irq_cpu_addr + offset);
296 static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
300 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
301 if (cmd & PCI_COMMAND_INTX_DISABLE)
304 cdns_pcie_ep_assert_intx(ep, fn, intx, true);
306 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
307 * from drivers/pci/dwc/pci-dra7xx.c
310 cdns_pcie_ep_assert_intx(ep, fn, intx, false);
314 static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
317 struct cdns_pcie *pcie = &ep->pcie;
318 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
319 u16 flags, mme, data, data_mask;
321 u64 pci_addr, pci_addr_mask = 0xff;
323 /* Check whether the MSI feature has been enabled by the PCI host. */
324 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
325 if (!(flags & PCI_MSI_FLAGS_ENABLE))
328 /* Get the number of enabled MSIs */
329 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
330 msi_count = 1 << mme;
331 if (!interrupt_num || interrupt_num > msi_count)
334 /* Compute the data value to be written. */
335 data_mask = msi_count - 1;
336 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
337 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
339 /* Get the PCI address where to write the data into. */
340 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
342 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
343 pci_addr &= GENMASK_ULL(63, 2);
345 /* Set the outbound region if needed. */
346 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
347 ep->irq_pci_fn != fn)) {
348 /* First region was reserved for IRQ writes. */
349 cdns_pcie_set_outbound_region(pcie, fn, 0,
352 pci_addr & ~pci_addr_mask,
354 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
357 writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
362 static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
363 enum pci_epc_irq_type type,
366 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
369 case PCI_EPC_IRQ_LEGACY:
370 return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
372 case PCI_EPC_IRQ_MSI:
373 return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
382 static int cdns_pcie_ep_start(struct pci_epc *epc)
384 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
385 struct cdns_pcie *pcie = &ep->pcie;
390 * BIT(0) is hardwired to 1, hence function 0 is always enabled
391 * and can't be disabled anyway.
394 list_for_each_entry(epf, &epc->pci_epf, list)
395 cfg |= BIT(epf->func_no);
396 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
399 * The PCIe links are automatically established by the controller
400 * once for all at powerup: the software can neither start nor stop
401 * those links later at runtime.
403 * Then we only have to notify the EP core that our links are already
404 * established. However we don't call directly pci_epc_linkup() because
405 * we've already locked the epc->lock.
407 list_for_each_entry(epf, &epc->pci_epf, list)
413 static const struct pci_epc_ops cdns_pcie_epc_ops = {
414 .write_header = cdns_pcie_ep_write_header,
415 .set_bar = cdns_pcie_ep_set_bar,
416 .clear_bar = cdns_pcie_ep_clear_bar,
417 .map_addr = cdns_pcie_ep_map_addr,
418 .unmap_addr = cdns_pcie_ep_unmap_addr,
419 .set_msi = cdns_pcie_ep_set_msi,
420 .get_msi = cdns_pcie_ep_get_msi,
421 .raise_irq = cdns_pcie_ep_raise_irq,
422 .start = cdns_pcie_ep_start,
425 static const struct of_device_id cdns_pcie_ep_of_match[] = {
426 { .compatible = "cdns,cdns-pcie-ep" },
431 static int cdns_pcie_ep_probe(struct platform_device *pdev)
433 struct device *dev = &pdev->dev;
434 struct device_node *np = dev->of_node;
435 struct cdns_pcie_ep *ep;
436 struct cdns_pcie *pcie;
438 struct resource *res;
442 ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
449 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
450 pcie->reg_base = devm_ioremap_resource(dev, res);
451 if (IS_ERR(pcie->reg_base)) {
452 dev_err(dev, "missing \"reg\"\n");
453 return PTR_ERR(pcie->reg_base);
456 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
458 dev_err(dev, "missing \"mem\"\n");
463 ret = of_property_read_u32(np, "cdns,max-outbound-regions",
466 dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
469 ep->ob_addr = devm_kcalloc(dev,
470 ep->max_regions, sizeof(*ep->ob_addr),
475 ret = cdns_pcie_init_phy(dev, pcie);
477 dev_err(dev, "failed to init phy\n");
480 platform_set_drvdata(pdev, pcie);
481 pm_runtime_enable(dev);
482 ret = pm_runtime_get_sync(dev);
484 dev_err(dev, "pm_runtime_get_sync() failed\n");
488 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
489 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
491 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
493 dev_err(dev, "failed to create epc device\n");
498 epc_set_drvdata(epc, ep);
500 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
501 epc->max_functions = 1;
503 ret = pci_epc_mem_init(epc, pcie->mem_res->start,
504 resource_size(pcie->mem_res));
506 dev_err(dev, "failed to initialize the memory space\n");
510 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
512 if (!ep->irq_cpu_addr) {
513 dev_err(dev, "failed to reserve memory space for MSI\n");
517 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
518 /* Reserve region 0 for IRQs */
519 set_bit(0, &ep->ob_region_map);
524 pci_epc_mem_exit(epc);
527 pm_runtime_put_sync(dev);
530 pm_runtime_disable(dev);
531 cdns_pcie_disable_phy(pcie);
532 phy_count = pcie->phy_count;
534 device_link_del(pcie->link[phy_count]);
539 static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
541 struct device *dev = &pdev->dev;
542 struct cdns_pcie *pcie = dev_get_drvdata(dev);
545 ret = pm_runtime_put_sync(dev);
547 dev_dbg(dev, "pm_runtime_put_sync failed\n");
549 pm_runtime_disable(dev);
551 cdns_pcie_disable_phy(pcie);
554 static struct platform_driver cdns_pcie_ep_driver = {
556 .name = "cdns-pcie-ep",
557 .of_match_table = cdns_pcie_ep_of_match,
558 .pm = &cdns_pcie_pm_ops,
560 .probe = cdns_pcie_ep_probe,
561 .shutdown = cdns_pcie_ep_shutdown,
563 builtin_platform_driver(cdns_pcie_ep_driver);