1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/crc32.h>
35 #include <linux/vmalloc.h>
36 #include <linux/qed/qed_iov_if.h>
40 #include "qed_init_ops.h"
43 #include "qed_reg_addr.h"
45 #include "qed_sriov.h"
47 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
50 union event_ring_data *data, u8 fw_return_code);
53 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
57 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
58 ETH_HSI_VER_NO_PKT_LEN_TUNN)
59 legacy |= QED_QCID_LEGACY_VF_RX_PROD;
61 if (!(p_vf->acquire.vfdev_info.capabilities &
62 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
63 legacy |= QED_QCID_LEGACY_VF_CID;
69 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
71 struct vf_start_ramrod_data *p_ramrod = NULL;
72 struct qed_spq_entry *p_ent = NULL;
73 struct qed_sp_init_data init_data;
78 memset(&init_data, 0, sizeof(init_data));
79 init_data.cid = qed_spq_get_cid(p_hwfn);
80 init_data.opaque_fid = p_vf->opaque_fid;
81 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
83 rc = qed_sp_init_request(p_hwfn, &p_ent,
84 COMMON_RAMROD_VF_START,
85 PROTOCOLID_COMMON, &init_data);
89 p_ramrod = &p_ent->ramrod.vf_start;
91 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
92 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
94 switch (p_hwfn->hw_info.personality) {
96 p_ramrod->personality = PERSONALITY_ETH;
98 case QED_PCI_ETH_ROCE:
99 case QED_PCI_ETH_IWARP:
100 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
103 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
104 p_hwfn->hw_info.personality);
108 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
109 if (fp_minor > ETH_HSI_VER_MINOR &&
110 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
113 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
116 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
117 fp_minor = ETH_HSI_VER_MINOR;
120 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
121 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
123 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
124 "VF[%d] - Starting using HSI %02x.%02x\n",
125 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
127 return qed_spq_post(p_hwfn, p_ent, NULL);
130 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
131 u32 concrete_vfid, u16 opaque_vfid)
133 struct vf_stop_ramrod_data *p_ramrod = NULL;
134 struct qed_spq_entry *p_ent = NULL;
135 struct qed_sp_init_data init_data;
139 memset(&init_data, 0, sizeof(init_data));
140 init_data.cid = qed_spq_get_cid(p_hwfn);
141 init_data.opaque_fid = opaque_vfid;
142 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
144 rc = qed_sp_init_request(p_hwfn, &p_ent,
145 COMMON_RAMROD_VF_STOP,
146 PROTOCOLID_COMMON, &init_data);
150 p_ramrod = &p_ent->ramrod.vf_stop;
152 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
154 return qed_spq_post(p_hwfn, p_ent, NULL);
157 static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
159 bool b_enabled_only, bool b_non_malicious)
161 if (!p_hwfn->pf_iov_info) {
162 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
166 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
170 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
174 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
181 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
185 struct qed_vf_info *vf = NULL;
187 if (!p_hwfn->pf_iov_info) {
188 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
192 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
193 b_enabled_only, false))
194 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
196 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
202 static struct qed_queue_cid *
203 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
207 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
208 if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
209 return p_queue->cids[i].p_cid;
215 enum qed_iov_validate_q_mode {
216 QED_IOV_VALIDATE_Q_NA,
217 QED_IOV_VALIDATE_Q_ENABLE,
218 QED_IOV_VALIDATE_Q_DISABLE,
221 static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
222 struct qed_vf_info *p_vf,
224 enum qed_iov_validate_q_mode mode,
229 if (mode == QED_IOV_VALIDATE_Q_NA)
232 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
233 struct qed_vf_queue_cid *p_qcid;
235 p_qcid = &p_vf->vf_queues[qid].cids[i];
240 if (p_qcid->b_is_tx != b_is_tx)
243 return mode == QED_IOV_VALIDATE_Q_ENABLE;
246 /* In case we haven't found any valid cid, then its disabled */
247 return mode == QED_IOV_VALIDATE_Q_DISABLE;
250 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
251 struct qed_vf_info *p_vf,
253 enum qed_iov_validate_q_mode mode)
255 if (rx_qid >= p_vf->num_rxqs) {
258 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
259 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
263 return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
266 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
267 struct qed_vf_info *p_vf,
269 enum qed_iov_validate_q_mode mode)
271 if (tx_qid >= p_vf->num_txqs) {
274 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
275 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
279 return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
282 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
283 struct qed_vf_info *p_vf, u16 sb_idx)
287 for (i = 0; i < p_vf->num_sbs; i++)
288 if (p_vf->igu_sbs[i] == sb_idx)
293 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
294 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
299 static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
300 struct qed_vf_info *p_vf)
304 for (i = 0; i < p_vf->num_rxqs; i++)
305 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
306 QED_IOV_VALIDATE_Q_ENABLE,
313 static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
314 struct qed_vf_info *p_vf)
318 for (i = 0; i < p_vf->num_txqs; i++)
319 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
320 QED_IOV_VALIDATE_Q_ENABLE,
327 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
328 int vfid, struct qed_ptt *p_ptt)
330 struct qed_bulletin_content *p_bulletin;
331 int crc_size = sizeof(p_bulletin->crc);
332 struct qed_dmae_params params;
333 struct qed_vf_info *p_vf;
335 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
339 if (!p_vf->vf_bulletin)
342 p_bulletin = p_vf->bulletin.p_virt;
344 /* Increment bulletin board version and compute crc */
345 p_bulletin->version++;
346 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
347 p_vf->bulletin.size - crc_size);
349 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
350 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
351 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
353 /* propagate bulletin board via dmae to vm memory */
354 memset(¶ms, 0, sizeof(params));
355 params.flags = QED_DMAE_FLAG_VF_DST;
356 params.dst_vfid = p_vf->abs_vf_id;
357 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
358 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
362 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
364 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
367 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
368 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
370 pci_read_config_word(cdev->pdev,
371 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
372 pci_read_config_word(cdev->pdev,
373 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
375 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
379 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
383 pci_read_config_word(cdev->pdev,
384 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
386 pci_read_config_word(cdev->pdev,
387 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
389 pci_read_config_word(cdev->pdev,
390 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
392 pci_read_config_dword(cdev->pdev,
393 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
395 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
397 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
401 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
407 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
409 /* Some sanity checks */
410 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
411 iov->total_vfs > NUM_OF_VFS(cdev)) {
412 /* This can happen only due to a bug. In this case we set
413 * num_vfs to zero to avoid memory corruption in the code that
414 * assumes max number of vfs
417 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
427 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
429 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
430 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
431 struct qed_bulletin_content *p_bulletin_virt;
432 dma_addr_t req_p, rply_p, bulletin_p;
433 union pfvf_tlvs *p_reply_virt_addr;
434 union vfpf_tlvs *p_req_virt_addr;
437 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
439 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
440 req_p = p_iov_info->mbx_msg_phys_addr;
441 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
442 rply_p = p_iov_info->mbx_reply_phys_addr;
443 p_bulletin_virt = p_iov_info->p_bulletins;
444 bulletin_p = p_iov_info->bulletins_phys;
445 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
447 "qed_iov_setup_vfdb called without allocating mem first\n");
451 for (idx = 0; idx < p_iov->total_vfs; idx++) {
452 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
455 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
456 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
457 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
458 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
460 vf->state = VF_STOPPED;
463 vf->bulletin.phys = idx *
464 sizeof(struct qed_bulletin_content) +
466 vf->bulletin.p_virt = p_bulletin_virt + idx;
467 vf->bulletin.size = sizeof(struct qed_bulletin_content);
469 vf->relative_vf_id = idx;
470 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
471 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
472 vf->concrete_fid = concrete;
473 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
474 (vf->abs_vf_id << 8);
475 vf->vport_id = idx + 1;
477 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
478 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
482 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
484 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
488 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
490 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
491 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
493 /* Allocate PF Mailbox buffer (per-VF) */
494 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
495 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
496 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
497 p_iov_info->mbx_msg_size,
498 &p_iov_info->mbx_msg_phys_addr,
503 /* Allocate PF Mailbox Reply buffer (per-VF) */
504 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
505 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
506 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
507 p_iov_info->mbx_reply_size,
508 &p_iov_info->mbx_reply_phys_addr,
513 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
515 p_v_addr = &p_iov_info->p_bulletins;
516 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
517 p_iov_info->bulletins_size,
518 &p_iov_info->bulletins_phys,
525 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
526 p_iov_info->mbx_msg_virt_addr,
527 (u64) p_iov_info->mbx_msg_phys_addr,
528 p_iov_info->mbx_reply_virt_addr,
529 (u64) p_iov_info->mbx_reply_phys_addr,
530 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
535 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
537 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
539 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
540 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
541 p_iov_info->mbx_msg_size,
542 p_iov_info->mbx_msg_virt_addr,
543 p_iov_info->mbx_msg_phys_addr);
545 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
546 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
547 p_iov_info->mbx_reply_size,
548 p_iov_info->mbx_reply_virt_addr,
549 p_iov_info->mbx_reply_phys_addr);
551 if (p_iov_info->p_bulletins)
552 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
553 p_iov_info->bulletins_size,
554 p_iov_info->p_bulletins,
555 p_iov_info->bulletins_phys);
558 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
560 struct qed_pf_iov *p_sriov;
562 if (!IS_PF_SRIOV(p_hwfn)) {
563 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
564 "No SR-IOV - no need for IOV db\n");
568 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
572 p_hwfn->pf_iov_info = p_sriov;
574 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
575 qed_sriov_eqe_event);
577 return qed_iov_allocate_vfdb(p_hwfn);
580 void qed_iov_setup(struct qed_hwfn *p_hwfn)
582 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
585 qed_iov_setup_vfdb(p_hwfn);
588 void qed_iov_free(struct qed_hwfn *p_hwfn)
590 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
592 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
593 qed_iov_free_vfdb(p_hwfn);
594 kfree(p_hwfn->pf_iov_info);
598 void qed_iov_free_hw_info(struct qed_dev *cdev)
600 kfree(cdev->p_iov_info);
601 cdev->p_iov_info = NULL;
604 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
606 struct qed_dev *cdev = p_hwfn->cdev;
610 if (IS_VF(p_hwfn->cdev))
613 /* Learn the PCI configuration */
614 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
615 PCI_EXT_CAP_ID_SRIOV);
617 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
621 /* Allocate a new struct for IOV information */
622 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
623 if (!cdev->p_iov_info)
626 cdev->p_iov_info->pos = pos;
628 rc = qed_iov_pci_cfg_info(cdev);
632 /* We want PF IOV to be synonemous with the existance of p_iov_info;
633 * In case the capability is published but there are no VFs, simply
634 * de-allocate the struct.
636 if (!cdev->p_iov_info->total_vfs) {
637 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
638 "IOV capabilities, but no VFs are published\n");
639 kfree(cdev->p_iov_info);
640 cdev->p_iov_info = NULL;
644 /* First VF index based on offset is tricky:
645 * - If ARI is supported [likely], offset - (16 - pf_id) would
646 * provide the number for eng0. 2nd engine Vfs would begin
647 * after the first engine's VFs.
648 * - If !ARI, VFs would start on next device.
649 * so offset - (256 - pf_id) would provide the number.
650 * Utilize the fact that (256 - pf_id) is achieved only by later
651 * to differentiate between the two.
654 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
655 u32 first = p_hwfn->cdev->p_iov_info->offset +
656 p_hwfn->abs_pf_id - 16;
658 cdev->p_iov_info->first_vf_in_pf = first;
660 if (QED_PATH_ID(p_hwfn))
661 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
663 u32 first = p_hwfn->cdev->p_iov_info->offset +
664 p_hwfn->abs_pf_id - 256;
666 cdev->p_iov_info->first_vf_in_pf = first;
669 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
670 "First VF in hwfn 0x%08x\n",
671 cdev->p_iov_info->first_vf_in_pf);
676 bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
677 int vfid, bool b_fail_malicious)
679 /* Check PF supports sriov */
680 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
681 !IS_PF_SRIOV_ALLOC(p_hwfn))
684 /* Check VF validity */
685 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
691 bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
693 return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
696 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
697 u16 rel_vf_id, u8 to_disable)
699 struct qed_vf_info *vf;
702 for_each_hwfn(cdev, i) {
703 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
705 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
709 vf->to_disable = to_disable;
713 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
717 if (!IS_QED_SRIOV(cdev))
720 for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
721 qed_iov_set_vf_to_disable(cdev, i, to_disable);
724 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
725 struct qed_ptt *p_ptt, u8 abs_vfid)
727 qed_wr(p_hwfn, p_ptt,
728 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
729 1 << (abs_vfid & 0x1f));
732 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
733 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
737 /* Set VF masks and configuration - pretend */
738 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
740 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
743 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
745 /* iterate over all queues, clear sb consumer */
746 for (i = 0; i < vf->num_sbs; i++)
747 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
749 vf->opaque_fid, true);
752 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
753 struct qed_ptt *p_ptt,
754 struct qed_vf_info *vf, bool enable)
758 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
760 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
763 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
765 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
767 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
770 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
774 qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
775 struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
780 /* For AH onward, configuration is per-PF. Find maximum of all
781 * the currently enabled child VFs, and set the number to be that.
783 if (!QED_IS_BB(p_hwfn->cdev)) {
784 qed_for_each_vf(p_hwfn, i) {
785 struct qed_vf_info *p_vf;
787 p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
791 current_max = max_t(u8, current_max, p_vf->num_sbs);
795 if (num_sbs > current_max)
796 return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
802 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
803 struct qed_ptt *p_ptt,
804 struct qed_vf_info *vf)
806 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
809 /* It's possible VF was previously considered malicious -
810 * clear the indication even if we're only going to disable VF.
812 vf->b_malicious = false;
819 "Enable internal access for vf %x [abs %x]\n",
820 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
822 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
824 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
826 rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
827 vf->abs_vf_id, vf->num_sbs);
831 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
833 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
834 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
836 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
837 p_hwfn->hw_info.hw_mode);
840 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
848 * @brief qed_iov_config_perm_table - configure the permission
850 * In E4, queue zone permission table size is 320x9. There
851 * are 320 VF queues for single engine device (256 for dual
852 * engine device), and each entry has the following format:
859 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
860 struct qed_ptt *p_ptt,
861 struct qed_vf_info *vf, u8 enable)
867 for (qid = 0; qid < vf->num_rxqs; qid++) {
868 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
871 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
872 val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
873 qed_wr(p_hwfn, p_ptt, reg_addr, val);
877 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
878 struct qed_ptt *p_ptt,
879 struct qed_vf_info *vf)
881 /* Reset vf in IGU - interrupts are still disabled */
882 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
884 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
886 /* Permission Table */
887 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
890 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
891 struct qed_ptt *p_ptt,
892 struct qed_vf_info *vf, u16 num_rx_queues)
894 struct qed_igu_block *p_block;
895 struct cau_sb_entry sb_entry;
899 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
900 num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
901 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
903 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
904 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
905 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
907 for (qid = 0; qid < num_rx_queues; qid++) {
908 p_block = qed_get_igu_free_sb(p_hwfn, false);
909 vf->igu_sbs[qid] = p_block->igu_sb_id;
910 p_block->status &= ~QED_IGU_STATUS_FREE;
911 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
913 qed_wr(p_hwfn, p_ptt,
914 IGU_REG_MAPPING_MEMORY +
915 sizeof(u32) * p_block->igu_sb_id, val);
917 /* Configure igu sb in CAU which were marked valid */
918 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
919 p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
920 qed_dmae_host2grc(p_hwfn, p_ptt,
921 (u64)(uintptr_t)&sb_entry,
922 CAU_REG_SB_VAR_MEMORY +
923 p_block->igu_sb_id * sizeof(u64), 2, 0);
926 vf->num_sbs = (u8) num_rx_queues;
931 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
932 struct qed_ptt *p_ptt,
933 struct qed_vf_info *vf)
935 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
939 /* Invalidate igu CAM lines and mark them as free */
940 for (idx = 0; idx < vf->num_sbs; idx++) {
941 igu_id = vf->igu_sbs[idx];
942 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
944 val = qed_rd(p_hwfn, p_ptt, addr);
945 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
946 qed_wr(p_hwfn, p_ptt, addr, val);
948 p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
949 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
955 static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
957 struct qed_mcp_link_params *params,
958 struct qed_mcp_link_state *link,
959 struct qed_mcp_link_capabilities *p_caps)
961 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
964 struct qed_bulletin_content *p_bulletin;
969 p_bulletin = p_vf->bulletin.p_virt;
970 p_bulletin->req_autoneg = params->speed.autoneg;
971 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
972 p_bulletin->req_forced_speed = params->speed.forced_speed;
973 p_bulletin->req_autoneg_pause = params->pause.autoneg;
974 p_bulletin->req_forced_rx = params->pause.forced_rx;
975 p_bulletin->req_forced_tx = params->pause.forced_tx;
976 p_bulletin->req_loopback = params->loopback_mode;
978 p_bulletin->link_up = link->link_up;
979 p_bulletin->speed = link->speed;
980 p_bulletin->full_duplex = link->full_duplex;
981 p_bulletin->autoneg = link->an;
982 p_bulletin->autoneg_complete = link->an_complete;
983 p_bulletin->parallel_detection = link->parallel_detection;
984 p_bulletin->pfc_enabled = link->pfc_enabled;
985 p_bulletin->partner_adv_speed = link->partner_adv_speed;
986 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
987 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
988 p_bulletin->partner_adv_pause = link->partner_adv_pause;
989 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
991 p_bulletin->capability_speed = p_caps->speed_capabilities;
994 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
995 struct qed_ptt *p_ptt,
996 struct qed_iov_vf_init_params *p_params)
998 struct qed_mcp_link_capabilities link_caps;
999 struct qed_mcp_link_params link_params;
1000 struct qed_mcp_link_state link_state;
1001 u8 num_of_vf_avaiable_chains = 0;
1002 struct qed_vf_info *vf = NULL;
1008 vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1010 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
1015 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
1016 p_params->rel_vf_id);
1020 /* Perform sanity checking on the requested queue_id */
1021 for (i = 0; i < p_params->num_queues; i++) {
1022 u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
1023 u16 max_vf_qzone = min_vf_qzone +
1024 FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
1026 qid = p_params->req_rx_queue[i];
1027 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1029 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1031 p_params->rel_vf_id,
1032 min_vf_qzone, max_vf_qzone);
1036 qid = p_params->req_tx_queue[i];
1037 if (qid > max_vf_qzone) {
1039 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1040 qid, p_params->rel_vf_id, max_vf_qzone);
1044 /* If client *really* wants, Tx qid can be shared with PF */
1045 if (qid < min_vf_qzone)
1048 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1049 p_params->rel_vf_id, qid, i);
1052 /* Limit number of queues according to number of CIDs */
1053 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1056 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1057 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1058 num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
1060 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
1063 if (!num_of_vf_avaiable_chains) {
1064 DP_ERR(p_hwfn, "no available igu sbs\n");
1068 /* Choose queue number and index ranges */
1069 vf->num_rxqs = num_of_vf_avaiable_chains;
1070 vf->num_txqs = num_of_vf_avaiable_chains;
1072 for (i = 0; i < vf->num_rxqs; i++) {
1073 struct qed_vf_queue *p_queue = &vf->vf_queues[i];
1075 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1076 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1078 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1079 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1080 vf->relative_vf_id, i, vf->igu_sbs[i],
1081 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1084 /* Update the link configuration in bulletin */
1085 memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
1086 sizeof(link_params));
1087 memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
1088 memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
1090 qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
1091 &link_params, &link_state, &link_caps);
1093 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1097 if (IS_LEAD_HWFN(p_hwfn))
1098 p_hwfn->cdev->p_iov_info->num_vfs++;
1104 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
1105 struct qed_ptt *p_ptt, u16 rel_vf_id)
1107 struct qed_mcp_link_capabilities caps;
1108 struct qed_mcp_link_params params;
1109 struct qed_mcp_link_state link;
1110 struct qed_vf_info *vf = NULL;
1112 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1114 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
1118 if (vf->bulletin.p_virt)
1119 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
1121 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1123 /* Get the link configuration back in bulletin so
1124 * that when VFs are re-enabled they get the actual
1125 * link configuration.
1127 memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1128 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1129 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1130 qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1132 /* Forget the VF's acquisition message */
1133 memset(&vf->acquire, 0, sizeof(vf->acquire));
1135 /* disablng interrupts and resetting permission table was done during
1136 * vf-close, however, we could get here without going through vf_close
1138 /* Disable Interrupts for VF */
1139 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1141 /* Reset Permission table */
1142 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1146 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1151 if (IS_LEAD_HWFN(p_hwfn))
1152 p_hwfn->cdev->p_iov_info->num_vfs--;
1158 static bool qed_iov_tlv_supported(u16 tlvtype)
1160 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1163 /* place a given tlv on the tlv buffer, continuing current tlv list */
1164 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1166 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1169 tl->length = length;
1171 /* Offset should keep pointing to next TLV (the end of the last) */
1174 /* Return a pointer to the start of the added tlv */
1175 return *offset - length;
1178 /* list the types and lengths of the tlvs on the buffer */
1179 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1181 u16 i = 1, total_length = 0;
1182 struct channel_tlv *tlv;
1185 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1188 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1189 "TLV number %d: type %d, length %d\n",
1190 i, tlv->type, tlv->length);
1192 if (tlv->type == CHANNEL_TLV_LIST_END)
1195 /* Validate entry - protect against malicious VFs */
1197 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1201 total_length += tlv->length;
1203 if (total_length >= sizeof(struct tlv_buffer_size)) {
1204 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1212 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1213 struct qed_ptt *p_ptt,
1214 struct qed_vf_info *p_vf,
1215 u16 length, u8 status)
1217 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1218 struct qed_dmae_params params;
1221 mbx->reply_virt->default_resp.hdr.status = status;
1223 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1225 eng_vf_id = p_vf->abs_vf_id;
1227 memset(¶ms, 0, sizeof(struct qed_dmae_params));
1228 params.flags = QED_DMAE_FLAG_VF_DST;
1229 params.dst_vfid = eng_vf_id;
1231 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1232 mbx->req_virt->first_tlv.reply_address +
1234 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1237 /* Once PF copies the rc to the VF, the latter can continue
1238 * and send an additional message. So we have to make sure the
1239 * channel would be re-set to ready prior to that.
1242 GTT_BAR0_MAP_REG_USDM_RAM +
1243 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1245 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1246 mbx->req_virt->first_tlv.reply_address,
1247 sizeof(u64) / 4, ¶ms);
1250 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1251 enum qed_iov_vport_update_flag flag)
1254 case QED_IOV_VP_UPDATE_ACTIVATE:
1255 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1256 case QED_IOV_VP_UPDATE_VLAN_STRIP:
1257 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1258 case QED_IOV_VP_UPDATE_TX_SWITCH:
1259 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1260 case QED_IOV_VP_UPDATE_MCAST:
1261 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1262 case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1263 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1264 case QED_IOV_VP_UPDATE_RSS:
1265 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1266 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1267 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1268 case QED_IOV_VP_UPDATE_SGE_TPA:
1269 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1275 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1276 struct qed_vf_info *p_vf,
1277 struct qed_iov_vf_mbx *p_mbx,
1279 u16 tlvs_mask, u16 tlvs_accepted)
1281 struct pfvf_def_resp_tlv *resp;
1282 u16 size, total_len, i;
1284 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1285 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1286 size = sizeof(struct pfvf_def_resp_tlv);
1289 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1291 /* Prepare response for all extended tlvs if they are found by PF */
1292 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1293 if (!(tlvs_mask & BIT(i)))
1296 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1297 qed_iov_vport_to_tlv(p_hwfn, i), size);
1299 if (tlvs_accepted & BIT(i))
1300 resp->hdr.status = status;
1302 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1306 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1307 p_vf->relative_vf_id,
1308 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1313 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1314 sizeof(struct channel_list_end_tlv));
1319 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1320 struct qed_ptt *p_ptt,
1321 struct qed_vf_info *vf_info,
1322 u16 type, u16 length, u8 status)
1324 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1326 mbx->offset = (u8 *)mbx->reply_virt;
1328 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1329 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1330 sizeof(struct channel_list_end_tlv));
1332 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1336 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1338 bool b_enabled_only)
1340 struct qed_vf_info *vf = NULL;
1342 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1346 return &vf->p_vf_info;
1349 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1351 struct qed_public_vf_info *vf_info;
1353 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1358 /* Clear the VF mac */
1359 eth_zero_addr(vf_info->mac);
1361 vf_info->rx_accept_mode = 0;
1362 vf_info->tx_accept_mode = 0;
1365 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1366 struct qed_vf_info *p_vf)
1370 p_vf->vf_bulletin = 0;
1371 p_vf->vport_instance = 0;
1372 p_vf->configured_features = 0;
1374 /* If VF previously requested less resources, go back to default */
1375 p_vf->num_rxqs = p_vf->num_sbs;
1376 p_vf->num_txqs = p_vf->num_sbs;
1378 p_vf->num_active_rxqs = 0;
1380 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1381 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1383 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1384 if (!p_queue->cids[j].p_cid)
1387 qed_eth_queue_cid_release(p_hwfn,
1388 p_queue->cids[j].p_cid);
1389 p_queue->cids[j].p_cid = NULL;
1393 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1394 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1395 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1398 /* Returns either 0, or log(size) */
1399 static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
1400 struct qed_ptt *p_ptt)
1402 u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1410 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
1411 struct qed_ptt *p_ptt,
1412 struct qed_vf_info *p_vf,
1413 struct vf_pf_resc_request *p_req,
1414 struct pf_vf_resc *p_resp)
1416 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1417 u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
1418 qed_db_addr_vf(0, DQ_DEMS_LEGACY);
1421 p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
1423 /* If VF didn't bother asking for QIDs than don't bother limiting
1424 * number of CIDs. The VF doesn't care about the number, and this
1425 * has the likely result of causing an additional acquisition.
1427 if (!(p_vf->acquire.vfdev_info.capabilities &
1428 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1431 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1432 * that would make sure doorbells for all CIDs fall within the bar.
1433 * If it doesn't, make sure regview window is sufficient.
1435 if (p_vf->acquire.vfdev_info.capabilities &
1436 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1437 bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1439 bar_size = 1 << bar_size;
1441 if (p_hwfn->cdev->num_hwfns > 1)
1444 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1447 if (bar_size / db_size < 256)
1448 p_resp->num_cids = min_t(u8, p_resp->num_cids,
1449 (u8)(bar_size / db_size));
1452 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1453 struct qed_ptt *p_ptt,
1454 struct qed_vf_info *p_vf,
1455 struct vf_pf_resc_request *p_req,
1456 struct pf_vf_resc *p_resp)
1460 /* Queue related information */
1461 p_resp->num_rxqs = p_vf->num_rxqs;
1462 p_resp->num_txqs = p_vf->num_txqs;
1463 p_resp->num_sbs = p_vf->num_sbs;
1465 for (i = 0; i < p_resp->num_sbs; i++) {
1466 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1467 p_resp->hw_sbs[i].sb_qid = 0;
1470 /* These fields are filled for backward compatibility.
1471 * Unused by modern vfs.
1473 for (i = 0; i < p_resp->num_rxqs; i++) {
1474 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1475 (u16 *)&p_resp->hw_qid[i]);
1479 /* Filter related information */
1480 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1481 p_req->num_mac_filters);
1482 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1483 p_req->num_vlan_filters);
1485 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1487 /* This isn't really needed/enforced, but some legacy VFs might depend
1488 * on the correct filling of this field.
1490 p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1492 /* Validate sufficient resources for VF */
1493 if (p_resp->num_rxqs < p_req->num_rxqs ||
1494 p_resp->num_txqs < p_req->num_txqs ||
1495 p_resp->num_sbs < p_req->num_sbs ||
1496 p_resp->num_mac_filters < p_req->num_mac_filters ||
1497 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1498 p_resp->num_mc_filters < p_req->num_mc_filters ||
1499 p_resp->num_cids < p_req->num_cids) {
1502 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1510 p_req->num_mac_filters,
1511 p_resp->num_mac_filters,
1512 p_req->num_vlan_filters,
1513 p_resp->num_vlan_filters,
1514 p_req->num_mc_filters,
1515 p_resp->num_mc_filters,
1516 p_req->num_cids, p_resp->num_cids);
1518 /* Some legacy OSes are incapable of correctly handling this
1521 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1522 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1523 (p_vf->acquire.vfdev_info.os_type ==
1524 VFPF_ACQUIRE_OS_WINDOWS))
1525 return PFVF_STATUS_SUCCESS;
1527 return PFVF_STATUS_NO_RESOURCE;
1530 return PFVF_STATUS_SUCCESS;
1533 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1534 struct pfvf_stats_info *p_stats)
1536 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1537 offsetof(struct mstorm_vf_zone,
1538 non_trigger.eth_queue_stat);
1539 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1540 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1541 offsetof(struct ustorm_vf_zone,
1542 non_trigger.eth_queue_stat);
1543 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1544 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1545 offsetof(struct pstorm_vf_zone,
1546 non_trigger.eth_queue_stat);
1547 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1548 p_stats->tstats.address = 0;
1549 p_stats->tstats.len = 0;
1552 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1553 struct qed_ptt *p_ptt,
1554 struct qed_vf_info *vf)
1556 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1557 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1558 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1559 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1560 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1561 struct pf_vf_resc *resc = &resp->resc;
1564 memset(resp, 0, sizeof(*resp));
1566 /* Write the PF version so that VF would know which version
1567 * is supported - might be later overriden. This guarantees that
1568 * VF could recognize legacy PF based on lack of versions in reply.
1570 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1571 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1573 if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1576 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1577 vf->abs_vf_id, vf->state);
1581 /* Validate FW compatibility */
1582 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1583 if (req->vfdev_info.capabilities &
1584 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1585 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1587 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1588 "VF[%d] is pre-fastpath HSI\n",
1590 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1591 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1594 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1596 req->vfdev_info.eth_fp_hsi_major,
1597 req->vfdev_info.eth_fp_hsi_minor,
1598 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1604 /* On 100g PFs, prevent old VFs from loading */
1605 if ((p_hwfn->cdev->num_hwfns > 1) &&
1606 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1608 "VF[%d] is running an old driver that doesn't support 100g\n",
1613 /* Store the acquire message */
1614 memcpy(&vf->acquire, req, sizeof(vf->acquire));
1616 vf->opaque_fid = req->vfdev_info.opaque_fid;
1618 vf->vf_bulletin = req->bulletin_addr;
1619 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1620 vf->bulletin.size : req->bulletin_size;
1622 /* fill in pfdev info */
1623 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1624 pfdev_info->db_size = 0;
1625 pfdev_info->indices_per_sb = PIS_PER_SB;
1627 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1628 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1629 if (p_hwfn->cdev->num_hwfns > 1)
1630 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1632 /* Share our ability to use multiple queue-ids only with VFs
1635 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1636 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1638 /* Share the sizes of the bars with VF */
1639 resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1641 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1643 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1645 pfdev_info->fw_major = FW_MAJOR_VERSION;
1646 pfdev_info->fw_minor = FW_MINOR_VERSION;
1647 pfdev_info->fw_rev = FW_REVISION_VERSION;
1648 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1650 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1653 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
1654 req->vfdev_info.eth_fp_hsi_minor);
1655 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1656 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1658 pfdev_info->dev_type = p_hwfn->cdev->type;
1659 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1661 /* Fill resources available to VF; Make sure there are enough to
1662 * satisfy the VF's request.
1664 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1665 &req->resc_request, resc);
1666 if (vfpf_status != PFVF_STATUS_SUCCESS)
1669 /* Start the VF in FW */
1670 rc = qed_sp_vf_start(p_hwfn, vf);
1672 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1673 vfpf_status = PFVF_STATUS_FAILURE;
1677 /* Fill agreed size of bulletin board in response */
1678 resp->bulletin_size = vf->bulletin.size;
1679 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1683 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1684 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1686 resp->pfdev_info.chip_num,
1687 resp->pfdev_info.db_size,
1688 resp->pfdev_info.indices_per_sb,
1689 resp->pfdev_info.capabilities,
1693 resc->num_mac_filters,
1694 resc->num_vlan_filters);
1695 vf->state = VF_ACQUIRED;
1697 /* Prepare Response */
1699 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1700 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1703 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1704 struct qed_vf_info *p_vf, bool val)
1706 struct qed_sp_vport_update_params params;
1709 if (val == p_vf->spoof_chk) {
1710 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1711 "Spoofchk value[%d] is already configured\n", val);
1715 memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params));
1716 params.opaque_fid = p_vf->opaque_fid;
1717 params.vport_id = p_vf->vport_id;
1718 params.update_anti_spoofing_en_flg = 1;
1719 params.anti_spoofing_en = val;
1721 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
1723 p_vf->spoof_chk = val;
1724 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1725 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1726 "Spoofchk val[%d] configured\n", val);
1728 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1729 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1730 val, p_vf->relative_vf_id);
1736 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1737 struct qed_vf_info *p_vf)
1739 struct qed_filter_ucast filter;
1743 memset(&filter, 0, sizeof(filter));
1744 filter.is_rx_filter = 1;
1745 filter.is_tx_filter = 1;
1746 filter.vport_to_add_to = p_vf->vport_id;
1747 filter.opcode = QED_FILTER_ADD;
1749 /* Reconfigure vlans */
1750 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1751 if (!p_vf->shadow_config.vlans[i].used)
1754 filter.type = QED_FILTER_VLAN;
1755 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1756 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1757 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1758 filter.vlan, p_vf->relative_vf_id);
1759 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1760 &filter, QED_SPQ_MODE_CB, NULL);
1763 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1764 filter.vlan, p_vf->relative_vf_id);
1773 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1774 struct qed_vf_info *p_vf, u64 events)
1778 if ((events & BIT(VLAN_ADDR_FORCED)) &&
1779 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1780 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1785 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1786 struct qed_vf_info *p_vf, u64 events)
1789 struct qed_filter_ucast filter;
1791 if (!p_vf->vport_instance)
1794 if (events & BIT(MAC_ADDR_FORCED)) {
1795 /* Since there's no way [currently] of removing the MAC,
1796 * we can always assume this means we need to force it.
1798 memset(&filter, 0, sizeof(filter));
1799 filter.type = QED_FILTER_MAC;
1800 filter.opcode = QED_FILTER_REPLACE;
1801 filter.is_rx_filter = 1;
1802 filter.is_tx_filter = 1;
1803 filter.vport_to_add_to = p_vf->vport_id;
1804 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1806 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1807 &filter, QED_SPQ_MODE_CB, NULL);
1810 "PF failed to configure MAC for VF\n");
1814 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1817 if (events & BIT(VLAN_ADDR_FORCED)) {
1818 struct qed_sp_vport_update_params vport_update;
1822 memset(&filter, 0, sizeof(filter));
1823 filter.type = QED_FILTER_VLAN;
1824 filter.is_rx_filter = 1;
1825 filter.is_tx_filter = 1;
1826 filter.vport_to_add_to = p_vf->vport_id;
1827 filter.vlan = p_vf->bulletin.p_virt->pvid;
1828 filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1831 /* Send the ramrod */
1832 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1833 &filter, QED_SPQ_MODE_CB, NULL);
1836 "PF failed to configure VLAN for VF\n");
1840 /* Update the default-vlan & silent vlan stripping */
1841 memset(&vport_update, 0, sizeof(vport_update));
1842 vport_update.opaque_fid = p_vf->opaque_fid;
1843 vport_update.vport_id = p_vf->vport_id;
1844 vport_update.update_default_vlan_enable_flg = 1;
1845 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1846 vport_update.update_default_vlan_flg = 1;
1847 vport_update.default_vlan = filter.vlan;
1849 vport_update.update_inner_vlan_removal_flg = 1;
1850 removal = filter.vlan ? 1
1851 : p_vf->shadow_config.inner_vlan_removal;
1852 vport_update.inner_vlan_removal_flg = removal;
1853 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1854 rc = qed_sp_vport_update(p_hwfn,
1856 QED_SPQ_MODE_EBLOCK, NULL);
1859 "PF failed to configure VF vport for vlan\n");
1863 /* Update all the Rx queues */
1864 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1865 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1866 struct qed_queue_cid *p_cid = NULL;
1868 /* There can be at most 1 Rx queue on qzone. Find it */
1869 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
1873 rc = qed_sp_eth_rx_queues_update(p_hwfn,
1876 QED_SPQ_MODE_EBLOCK,
1880 "Failed to send Rx update fo queue[0x%04x]\n",
1881 p_cid->rel.queue_id);
1887 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1889 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
1892 /* If forced features are terminated, we need to configure the shadow
1893 * configuration back again.
1896 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1901 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1902 struct qed_ptt *p_ptt,
1903 struct qed_vf_info *vf)
1905 struct qed_sp_vport_start_params params = { 0 };
1906 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1907 struct vfpf_vport_start_tlv *start;
1908 u8 status = PFVF_STATUS_SUCCESS;
1909 struct qed_vf_info *vf_info;
1914 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1916 DP_NOTICE(p_hwfn->cdev,
1917 "Failed to get VF info, invalid vfid [%d]\n",
1918 vf->relative_vf_id);
1922 vf->state = VF_ENABLED;
1923 start = &mbx->req_virt->start_vport;
1925 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1927 /* Initialize Status block in CAU */
1928 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1929 if (!start->sb_addr[sb_id]) {
1930 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1931 "VF[%d] did not fill the address of SB %d\n",
1932 vf->relative_vf_id, sb_id);
1936 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1937 start->sb_addr[sb_id],
1938 vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1941 vf->mtu = start->mtu;
1942 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1944 /* Take into consideration configuration forced by hypervisor;
1945 * If none is configured, use the supplied VF values [for old
1946 * vfs that would still be fine, since they passed '0' as padding].
1948 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1949 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1950 u8 vf_req = start->only_untagged;
1952 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1953 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1956 params.tpa_mode = start->tpa_mode;
1957 params.remove_inner_vlan = start->inner_vlan_removal;
1958 params.tx_switching = true;
1960 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1961 params.drop_ttl0 = false;
1962 params.concrete_fid = vf->concrete_fid;
1963 params.opaque_fid = vf->opaque_fid;
1964 params.vport_id = vf->vport_id;
1965 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1966 params.mtu = vf->mtu;
1968 /* Non trusted VFs should enable control frame filtering */
1969 params.check_mac = !vf->p_vf_info.is_trusted_configured;
1971 rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
1974 "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1975 status = PFVF_STATUS_FAILURE;
1977 vf->vport_instance++;
1979 /* Force configuration if needed on the newly opened vport */
1980 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1982 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1984 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1985 sizeof(struct pfvf_def_resp_tlv), status);
1988 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1989 struct qed_ptt *p_ptt,
1990 struct qed_vf_info *vf)
1992 u8 status = PFVF_STATUS_SUCCESS;
1995 vf->vport_instance--;
1996 vf->spoof_chk = false;
1998 if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
1999 (qed_iov_validate_active_txq(p_hwfn, vf))) {
2000 vf->b_malicious = true;
2002 "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
2004 status = PFVF_STATUS_MALICIOUS;
2008 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2010 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
2012 status = PFVF_STATUS_FAILURE;
2015 /* Forget the configuration on the vport */
2016 vf->configured_features = 0;
2017 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2020 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2021 sizeof(struct pfvf_def_resp_tlv), status);
2024 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
2025 struct qed_ptt *p_ptt,
2026 struct qed_vf_info *vf,
2027 u8 status, bool b_legacy)
2029 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2030 struct pfvf_start_queue_resp_tlv *p_tlv;
2031 struct vfpf_start_rxq_tlv *req;
2034 mbx->offset = (u8 *)mbx->reply_virt;
2036 /* Taking a bigger struct instead of adding a TLV to list was a
2037 * mistake, but one which we're now stuck with, as some older
2038 * clients assume the size of the previous response.
2041 length = sizeof(*p_tlv);
2043 length = sizeof(struct pfvf_def_resp_tlv);
2045 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2047 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2048 sizeof(struct channel_list_end_tlv));
2050 /* Update the TLV with the response */
2051 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2052 req = &mbx->req_virt->start_rxq;
2053 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2054 offsetof(struct mstorm_vf_zone,
2055 non_trigger.eth_rx_queue_producers) +
2056 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2059 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2062 static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
2063 struct qed_vf_info *p_vf, bool b_is_tx)
2065 struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2066 struct vfpf_qid_tlv *p_qid_tlv;
2068 /* Search for the qid if the VF published its going to provide it */
2069 if (!(p_vf->acquire.vfdev_info.capabilities &
2070 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2072 return QED_IOV_LEGACY_QID_TX;
2074 return QED_IOV_LEGACY_QID_RX;
2077 p_qid_tlv = (struct vfpf_qid_tlv *)
2078 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2081 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2082 "VF[%2x]: Failed to provide qid\n",
2083 p_vf->relative_vf_id);
2085 return QED_IOV_QID_INVALID;
2088 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2089 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2090 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2091 p_vf->relative_vf_id, p_qid_tlv->qid);
2092 return QED_IOV_QID_INVALID;
2095 return p_qid_tlv->qid;
2098 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
2099 struct qed_ptt *p_ptt,
2100 struct qed_vf_info *vf)
2102 struct qed_queue_start_common_params params;
2103 struct qed_queue_cid_vf_params vf_params;
2104 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2105 u8 status = PFVF_STATUS_NO_RESOURCE;
2106 u8 qid_usage_idx, vf_legacy = 0;
2107 struct vfpf_start_rxq_tlv *req;
2108 struct qed_vf_queue *p_queue;
2109 struct qed_queue_cid *p_cid;
2110 struct qed_sb_info sb_dummy;
2113 req = &mbx->req_virt->start_rxq;
2115 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2116 QED_IOV_VALIDATE_Q_DISABLE) ||
2117 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2120 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2121 if (qid_usage_idx == QED_IOV_QID_INVALID)
2124 p_queue = &vf->vf_queues[req->rx_qid];
2125 if (p_queue->cids[qid_usage_idx].p_cid)
2128 vf_legacy = qed_vf_calculate_legacy(vf);
2130 /* Acquire a new queue-cid */
2131 memset(¶ms, 0, sizeof(params));
2132 params.queue_id = p_queue->fw_rx_qid;
2133 params.vport_id = vf->vport_id;
2134 params.stats_id = vf->abs_vf_id + 0x10;
2135 /* Since IGU index is passed via sb_info, construct a dummy one */
2136 memset(&sb_dummy, 0, sizeof(sb_dummy));
2137 sb_dummy.igu_sb_id = req->hw_sb;
2138 params.p_sb = &sb_dummy;
2139 params.sb_idx = req->sb_index;
2141 memset(&vf_params, 0, sizeof(vf_params));
2142 vf_params.vfid = vf->relative_vf_id;
2143 vf_params.vf_qid = (u8)req->rx_qid;
2144 vf_params.vf_legacy = vf_legacy;
2145 vf_params.qid_usage_idx = qid_usage_idx;
2146 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2147 ¶ms, true, &vf_params);
2151 /* Legacy VFs have their Producers in a different location, which they
2152 * calculate on their own and clean the producer prior to this.
2154 if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
2156 GTT_BAR0_MAP_REG_MSDM_RAM +
2157 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2160 rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
2163 req->cqe_pbl_addr, req->cqe_pbl_size);
2165 status = PFVF_STATUS_FAILURE;
2166 qed_eth_queue_cid_release(p_hwfn, p_cid);
2168 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2169 p_queue->cids[qid_usage_idx].b_is_tx = false;
2170 status = PFVF_STATUS_SUCCESS;
2171 vf->num_active_rxqs++;
2175 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2177 QED_QCID_LEGACY_VF_RX_PROD));
2181 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2182 struct qed_tunnel_info *p_tun,
2183 u16 tunn_feature_mask)
2185 p_resp->tunn_feature_mask = tunn_feature_mask;
2186 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2187 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2188 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2189 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2190 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2191 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2192 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2193 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2194 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2195 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2196 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2197 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2201 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2202 struct qed_tunn_update_type *p_tun,
2203 enum qed_tunn_mode mask, u8 tun_cls)
2205 if (p_req->tun_mode_update_mask & BIT(mask)) {
2206 p_tun->b_update_mode = true;
2208 if (p_req->tunn_mode & BIT(mask))
2209 p_tun->b_mode_enabled = true;
2212 p_tun->tun_cls = tun_cls;
2216 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2217 struct qed_tunn_update_type *p_tun,
2218 struct qed_tunn_update_udp_port *p_port,
2219 enum qed_tunn_mode mask,
2220 u8 tun_cls, u8 update_port, u16 port)
2223 p_port->b_update_port = true;
2224 p_port->port = port;
2227 __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2231 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2233 bool b_update_requested = false;
2235 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2236 p_req->update_geneve_port || p_req->update_vxlan_port)
2237 b_update_requested = true;
2239 return b_update_requested;
2242 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
2244 if (tun->b_update_mode && !tun->b_mode_enabled) {
2245 tun->b_update_mode = false;
2251 qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
2252 u16 *tun_features, bool *update,
2253 struct qed_tunnel_info *tun_src)
2255 struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
2256 struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
2257 u16 bultn_vxlan_port, bultn_geneve_port;
2258 void *cookie = p_hwfn->cdev->ops_cookie;
2261 *tun_features = p_hwfn->cdev->tunn_feature_mask;
2262 bultn_vxlan_port = tun->vxlan_port.port;
2263 bultn_geneve_port = tun->geneve_port.port;
2264 qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
2265 qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
2266 qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
2267 qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
2268 qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
2270 if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
2271 (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2272 tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2273 tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2274 tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2275 tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
2276 tun_src->b_update_rx_cls = false;
2277 tun_src->b_update_tx_cls = false;
2281 if (tun_src->vxlan_port.b_update_port) {
2282 if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
2283 tun_src->vxlan_port.b_update_port = false;
2286 bultn_vxlan_port = tun_src->vxlan_port.port;
2290 if (tun_src->geneve_port.b_update_port) {
2291 if (tun_src->geneve_port.port == tun->geneve_port.port) {
2292 tun_src->geneve_port.b_update_port = false;
2295 bultn_geneve_port = tun_src->geneve_port.port;
2299 qed_for_each_vf(p_hwfn, i) {
2300 qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
2304 qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2305 ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
2310 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
2311 struct qed_ptt *p_ptt,
2312 struct qed_vf_info *p_vf)
2314 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
2315 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2316 struct pfvf_update_tunn_param_tlv *p_resp;
2317 struct vfpf_update_tunn_param_tlv *p_req;
2318 u8 status = PFVF_STATUS_SUCCESS;
2319 bool b_update_required = false;
2320 struct qed_tunnel_info tunn;
2321 u16 tunn_feature_mask = 0;
2324 mbx->offset = (u8 *)mbx->reply_virt;
2326 memset(&tunn, 0, sizeof(tunn));
2327 p_req = &mbx->req_virt->tunn_param_update;
2329 if (!qed_iov_pf_validate_tunn_param(p_req)) {
2330 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2331 "No tunnel update requested by VF\n");
2332 status = PFVF_STATUS_FAILURE;
2336 tunn.b_update_rx_cls = p_req->update_tun_cls;
2337 tunn.b_update_tx_cls = p_req->update_tun_cls;
2339 qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2340 QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2341 p_req->update_vxlan_port,
2343 qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2344 QED_MODE_L2GENEVE_TUNN,
2345 p_req->l2geneve_clss,
2346 p_req->update_geneve_port,
2347 p_req->geneve_port);
2348 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2349 QED_MODE_IPGENEVE_TUNN,
2350 p_req->ipgeneve_clss);
2351 __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2352 QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
2353 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2354 QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
2356 /* If PF modifies VF's req then it should
2357 * still return an error in case of partial configuration
2358 * or modified configuration as opposed to requested one.
2360 rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
2361 &b_update_required, &tunn);
2364 status = PFVF_STATUS_FAILURE;
2366 /* If QED client is willing to update anything ? */
2367 if (b_update_required) {
2370 rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2371 QED_SPQ_MODE_EBLOCK, NULL);
2373 status = PFVF_STATUS_FAILURE;
2375 geneve_port = p_tun->geneve_port.port;
2376 qed_for_each_vf(p_hwfn, i) {
2377 qed_iov_bulletin_set_udp_ports(p_hwfn, i,
2378 p_tun->vxlan_port.port,
2384 p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
2385 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2387 qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2388 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2389 sizeof(struct channel_list_end_tlv));
2391 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2394 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
2395 struct qed_ptt *p_ptt,
2396 struct qed_vf_info *p_vf,
2399 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2400 struct pfvf_start_queue_resp_tlv *p_tlv;
2401 bool b_legacy = false;
2404 mbx->offset = (u8 *)mbx->reply_virt;
2406 /* Taking a bigger struct instead of adding a TLV to list was a
2407 * mistake, but one which we're now stuck with, as some older
2408 * clients assume the size of the previous response.
2410 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2411 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2415 length = sizeof(*p_tlv);
2417 length = sizeof(struct pfvf_def_resp_tlv);
2419 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2421 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2422 sizeof(struct channel_list_end_tlv));
2424 /* Update the TLV with the response */
2425 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2426 p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
2428 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2431 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
2432 struct qed_ptt *p_ptt,
2433 struct qed_vf_info *vf)
2435 struct qed_queue_start_common_params params;
2436 struct qed_queue_cid_vf_params vf_params;
2437 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2438 u8 status = PFVF_STATUS_NO_RESOURCE;
2439 struct vfpf_start_txq_tlv *req;
2440 struct qed_vf_queue *p_queue;
2441 struct qed_queue_cid *p_cid;
2442 struct qed_sb_info sb_dummy;
2443 u8 qid_usage_idx, vf_legacy;
2448 memset(¶ms, 0, sizeof(params));
2449 req = &mbx->req_virt->start_txq;
2451 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2452 QED_IOV_VALIDATE_Q_NA) ||
2453 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2456 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2457 if (qid_usage_idx == QED_IOV_QID_INVALID)
2460 p_queue = &vf->vf_queues[req->tx_qid];
2461 if (p_queue->cids[qid_usage_idx].p_cid)
2464 vf_legacy = qed_vf_calculate_legacy(vf);
2466 /* Acquire a new queue-cid */
2467 params.queue_id = p_queue->fw_tx_qid;
2468 params.vport_id = vf->vport_id;
2469 params.stats_id = vf->abs_vf_id + 0x10;
2471 /* Since IGU index is passed via sb_info, construct a dummy one */
2472 memset(&sb_dummy, 0, sizeof(sb_dummy));
2473 sb_dummy.igu_sb_id = req->hw_sb;
2474 params.p_sb = &sb_dummy;
2475 params.sb_idx = req->sb_index;
2477 memset(&vf_params, 0, sizeof(vf_params));
2478 vf_params.vfid = vf->relative_vf_id;
2479 vf_params.vf_qid = (u8)req->tx_qid;
2480 vf_params.vf_legacy = vf_legacy;
2481 vf_params.qid_usage_idx = qid_usage_idx;
2483 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2484 ¶ms, false, &vf_params);
2488 pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
2489 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
2490 req->pbl_addr, req->pbl_size, pq);
2492 status = PFVF_STATUS_FAILURE;
2493 qed_eth_queue_cid_release(p_hwfn, p_cid);
2495 status = PFVF_STATUS_SUCCESS;
2496 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2497 p_queue->cids[qid_usage_idx].b_is_tx = true;
2502 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
2505 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2506 struct qed_vf_info *vf,
2508 u8 qid_usage_idx, bool cqe_completion)
2510 struct qed_vf_queue *p_queue;
2513 if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
2516 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2517 vf->relative_vf_id, rxq_id, qid_usage_idx);
2521 p_queue = &vf->vf_queues[rxq_id];
2523 /* We've validated the index and the existence of the active RXQ -
2524 * now we need to make sure that it's using the correct qid.
2526 if (!p_queue->cids[qid_usage_idx].p_cid ||
2527 p_queue->cids[qid_usage_idx].b_is_tx) {
2528 struct qed_queue_cid *p_cid;
2530 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
2533 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2535 rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
2539 /* Now that we know we have a valid Rx-queue - close it */
2540 rc = qed_eth_rx_queue_stop(p_hwfn,
2541 p_queue->cids[qid_usage_idx].p_cid,
2542 false, cqe_completion);
2546 p_queue->cids[qid_usage_idx].p_cid = NULL;
2547 vf->num_active_rxqs--;
2552 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2553 struct qed_vf_info *vf,
2554 u16 txq_id, u8 qid_usage_idx)
2556 struct qed_vf_queue *p_queue;
2559 if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
2562 p_queue = &vf->vf_queues[txq_id];
2563 if (!p_queue->cids[qid_usage_idx].p_cid ||
2564 !p_queue->cids[qid_usage_idx].b_is_tx)
2567 rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
2571 p_queue->cids[qid_usage_idx].p_cid = NULL;
2575 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2576 struct qed_ptt *p_ptt,
2577 struct qed_vf_info *vf)
2579 u16 length = sizeof(struct pfvf_def_resp_tlv);
2580 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2581 u8 status = PFVF_STATUS_FAILURE;
2582 struct vfpf_stop_rxqs_tlv *req;
2586 /* There has never been an official driver that used this interface
2587 * for stopping multiple queues, and it is now considered deprecated.
2588 * Validate this isn't used here.
2590 req = &mbx->req_virt->stop_rxqs;
2591 if (req->num_rxqs != 1) {
2592 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2593 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2594 vf->relative_vf_id);
2595 status = PFVF_STATUS_NOT_SUPPORTED;
2599 /* Find which qid-index is associated with the queue */
2600 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2601 if (qid_usage_idx == QED_IOV_QID_INVALID)
2604 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2605 qid_usage_idx, req->cqe_completion);
2607 status = PFVF_STATUS_SUCCESS;
2609 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2613 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2614 struct qed_ptt *p_ptt,
2615 struct qed_vf_info *vf)
2617 u16 length = sizeof(struct pfvf_def_resp_tlv);
2618 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2619 u8 status = PFVF_STATUS_FAILURE;
2620 struct vfpf_stop_txqs_tlv *req;
2624 /* There has never been an official driver that used this interface
2625 * for stopping multiple queues, and it is now considered deprecated.
2626 * Validate this isn't used here.
2628 req = &mbx->req_virt->stop_txqs;
2629 if (req->num_txqs != 1) {
2630 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2631 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2632 vf->relative_vf_id);
2633 status = PFVF_STATUS_NOT_SUPPORTED;
2637 /* Find which qid-index is associated with the queue */
2638 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2639 if (qid_usage_idx == QED_IOV_QID_INVALID)
2642 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
2644 status = PFVF_STATUS_SUCCESS;
2647 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2651 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2652 struct qed_ptt *p_ptt,
2653 struct qed_vf_info *vf)
2655 struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
2656 u16 length = sizeof(struct pfvf_def_resp_tlv);
2657 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2658 struct vfpf_update_rxq_tlv *req;
2659 u8 status = PFVF_STATUS_FAILURE;
2660 u8 complete_event_flg;
2661 u8 complete_cqe_flg;
2666 req = &mbx->req_virt->update_rxq;
2667 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2668 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2670 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2671 if (qid_usage_idx == QED_IOV_QID_INVALID)
2674 /* There shouldn't exist a VF that uses queue-qids yet uses this
2675 * API with multiple Rx queues. Validate this.
2677 if ((vf->acquire.vfdev_info.capabilities &
2678 VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
2679 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2680 "VF[%d] supports QIDs but sends multiple queues\n",
2681 vf->relative_vf_id);
2685 /* Validate inputs - for the legacy case this is still true since
2686 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2688 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2689 if (!qed_iov_validate_rxq(p_hwfn, vf, i,
2690 QED_IOV_VALIDATE_Q_NA) ||
2691 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2692 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2693 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2694 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2695 vf->relative_vf_id, req->rx_qid,
2701 /* Prepare the handlers */
2702 for (i = 0; i < req->num_rxqs; i++) {
2703 u16 qid = req->rx_qid + i;
2705 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2708 rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2712 QED_SPQ_MODE_EBLOCK, NULL);
2716 status = PFVF_STATUS_SUCCESS;
2718 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2722 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2723 void *p_tlvs_list, u16 req_type)
2725 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2729 if (!p_tlv->length) {
2730 DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2734 if (p_tlv->type == req_type) {
2735 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2736 "Extended tlv type %d, length %d found\n",
2737 p_tlv->type, p_tlv->length);
2741 len += p_tlv->length;
2742 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2744 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2745 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2748 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2754 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2755 struct qed_sp_vport_update_params *p_data,
2756 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2758 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2759 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2761 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2762 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2766 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2767 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2768 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2769 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2770 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2774 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2775 struct qed_sp_vport_update_params *p_data,
2776 struct qed_vf_info *p_vf,
2777 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2779 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2780 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2782 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2783 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2787 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2789 /* Ignore the VF request if we're forcing a vlan */
2790 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
2791 p_data->update_inner_vlan_removal_flg = 1;
2792 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2795 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2799 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2800 struct qed_sp_vport_update_params *p_data,
2801 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2803 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2804 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2806 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2807 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2809 if (!p_tx_switch_tlv)
2812 p_data->update_tx_switching_flg = 1;
2813 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2814 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2818 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2819 struct qed_sp_vport_update_params *p_data,
2820 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2822 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2823 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2825 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2826 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2830 p_data->update_approx_mcast_flg = 1;
2831 memcpy(p_data->bins, p_mcast_tlv->bins,
2832 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2833 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2837 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2838 struct qed_sp_vport_update_params *p_data,
2839 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2841 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2842 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2843 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2845 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2846 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2850 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2851 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2852 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2853 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2854 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2858 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2859 struct qed_sp_vport_update_params *p_data,
2860 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2862 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2863 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2865 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2866 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2868 if (!p_accept_any_vlan)
2871 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2872 p_data->update_accept_any_vlan_flg =
2873 p_accept_any_vlan->update_accept_any_vlan_flg;
2874 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2878 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2879 struct qed_vf_info *vf,
2880 struct qed_sp_vport_update_params *p_data,
2881 struct qed_rss_params *p_rss,
2882 struct qed_iov_vf_mbx *p_mbx,
2883 u16 *tlvs_mask, u16 *tlvs_accepted)
2885 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2886 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2887 bool b_reject = false;
2891 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2892 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2894 p_data->rss_params = NULL;
2898 memset(p_rss, 0, sizeof(struct qed_rss_params));
2900 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2901 VFPF_UPDATE_RSS_CONFIG_FLAG);
2902 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2903 VFPF_UPDATE_RSS_CAPS_FLAG);
2904 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2905 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2906 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2907 VFPF_UPDATE_RSS_KEY_FLAG);
2909 p_rss->rss_enable = p_rss_tlv->rss_enable;
2910 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2911 p_rss->rss_caps = p_rss_tlv->rss_caps;
2912 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2913 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2915 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2916 (1 << p_rss_tlv->rss_table_size_log));
2918 for (i = 0; i < table_size; i++) {
2919 struct qed_queue_cid *p_cid;
2921 q_idx = p_rss_tlv->rss_ind_table[i];
2922 if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
2923 QED_IOV_VALIDATE_Q_ENABLE)) {
2926 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2927 vf->relative_vf_id, q_idx);
2932 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
2933 p_rss->rss_ind_table[i] = p_cid;
2936 p_data->rss_params = p_rss;
2938 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2940 *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
2944 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2945 struct qed_vf_info *vf,
2946 struct qed_sp_vport_update_params *p_data,
2947 struct qed_sge_tpa_params *p_sge_tpa,
2948 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2950 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2951 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2953 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2954 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2956 if (!p_sge_tpa_tlv) {
2957 p_data->sge_tpa_params = NULL;
2961 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2963 p_sge_tpa->update_tpa_en_flg =
2964 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2965 p_sge_tpa->update_tpa_param_flg =
2966 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2967 VFPF_UPDATE_TPA_PARAM_FLAG);
2969 p_sge_tpa->tpa_ipv4_en_flg =
2970 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2971 p_sge_tpa->tpa_ipv6_en_flg =
2972 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2973 p_sge_tpa->tpa_pkt_split_flg =
2974 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2975 p_sge_tpa->tpa_hdr_data_split_flg =
2976 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2977 p_sge_tpa->tpa_gro_consistent_flg =
2978 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2980 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2981 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2982 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2983 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2984 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2986 p_data->sge_tpa_params = p_sge_tpa;
2988 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2991 static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
2993 struct qed_sp_vport_update_params *params,
2996 u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
2997 struct qed_filter_accept_flags *flags = ¶ms->accept_flags;
2998 struct qed_public_vf_info *vf_info;
3001 tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) |
3002 BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN);
3004 /* Untrusted VFs can't even be trusted to know that fact.
3005 * Simply indicate everything is configured fine, and trace
3006 * configuration 'behind their back'.
3008 if (!(*tlvs & tlv_mask))
3011 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3013 if (flags->update_rx_mode_config) {
3014 vf_info->rx_accept_mode = flags->rx_accept_filter;
3015 if (!vf_info->is_trusted_configured)
3016 flags->rx_accept_filter &= ~mask;
3019 if (flags->update_tx_mode_config) {
3020 vf_info->tx_accept_mode = flags->tx_accept_filter;
3021 if (!vf_info->is_trusted_configured)
3022 flags->tx_accept_filter &= ~mask;
3025 if (params->update_accept_any_vlan_flg) {
3026 vf_info->accept_any_vlan = params->accept_any_vlan;
3028 if (vf_info->forced_vlan && !vf_info->is_trusted_configured)
3029 params->accept_any_vlan = false;
3035 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
3036 struct qed_ptt *p_ptt,
3037 struct qed_vf_info *vf)
3039 struct qed_rss_params *p_rss_params = NULL;
3040 struct qed_sp_vport_update_params params;
3041 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3042 struct qed_sge_tpa_params sge_tpa_params;
3043 u16 tlvs_mask = 0, tlvs_accepted = 0;
3044 u8 status = PFVF_STATUS_SUCCESS;
3048 /* Valiate PF can send such a request */
3049 if (!vf->vport_instance) {
3052 "No VPORT instance available for VF[%d], failing vport update\n",
3054 status = PFVF_STATUS_FAILURE;
3057 p_rss_params = vzalloc(sizeof(*p_rss_params));
3058 if (p_rss_params == NULL) {
3059 status = PFVF_STATUS_FAILURE;
3063 memset(¶ms, 0, sizeof(params));
3064 params.opaque_fid = vf->opaque_fid;
3065 params.vport_id = vf->vport_id;
3066 params.rss_params = NULL;
3068 /* Search for extended tlvs list and update values
3069 * from VF in struct qed_sp_vport_update_params.
3071 qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3072 qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3073 qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3074 qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3075 qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3076 qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3077 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
3078 &sge_tpa_params, mbx, &tlvs_mask);
3080 tlvs_accepted = tlvs_mask;
3082 /* Some of the extended TLVs need to be validated first; In that case,
3083 * they can update the mask without updating the accepted [so that
3084 * PF could communicate to VF it has rejected request].
3086 qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3087 mbx, &tlvs_mask, &tlvs_accepted);
3089 if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
3090 ¶ms, &tlvs_accepted)) {
3092 status = PFVF_STATUS_NOT_SUPPORTED;
3096 if (!tlvs_accepted) {
3098 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3099 "Upper-layer prevents VF vport configuration\n");
3101 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3102 "No feature tlvs found for vport update\n");
3103 status = PFVF_STATUS_NOT_SUPPORTED;
3107 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
3110 status = PFVF_STATUS_FAILURE;
3113 vfree(p_rss_params);
3114 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3115 tlvs_mask, tlvs_accepted);
3116 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3119 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
3120 struct qed_vf_info *p_vf,
3121 struct qed_filter_ucast *p_params)
3125 /* First remove entries and then add new ones */
3126 if (p_params->opcode == QED_FILTER_REMOVE) {
3127 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3128 if (p_vf->shadow_config.vlans[i].used &&
3129 p_vf->shadow_config.vlans[i].vid ==
3131 p_vf->shadow_config.vlans[i].used = false;
3134 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3137 "VF [%d] - Tries to remove a non-existing vlan\n",
3138 p_vf->relative_vf_id);
3141 } else if (p_params->opcode == QED_FILTER_REPLACE ||
3142 p_params->opcode == QED_FILTER_FLUSH) {
3143 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3144 p_vf->shadow_config.vlans[i].used = false;
3147 /* In forced mode, we're willing to remove entries - but we don't add
3150 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
3153 if (p_params->opcode == QED_FILTER_ADD ||
3154 p_params->opcode == QED_FILTER_REPLACE) {
3155 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3156 if (p_vf->shadow_config.vlans[i].used)
3159 p_vf->shadow_config.vlans[i].used = true;
3160 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3164 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3167 "VF [%d] - Tries to configure more than %d vlan filters\n",
3168 p_vf->relative_vf_id,
3169 QED_ETH_VF_NUM_VLAN_FILTERS + 1);
3177 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
3178 struct qed_vf_info *p_vf,
3179 struct qed_filter_ucast *p_params)
3183 /* If we're in forced-mode, we don't allow any change */
3184 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
3187 /* First remove entries and then add new ones */
3188 if (p_params->opcode == QED_FILTER_REMOVE) {
3189 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3190 if (ether_addr_equal(p_vf->shadow_config.macs[i],
3192 eth_zero_addr(p_vf->shadow_config.macs[i]);
3197 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3198 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3199 "MAC isn't configured\n");
3202 } else if (p_params->opcode == QED_FILTER_REPLACE ||
3203 p_params->opcode == QED_FILTER_FLUSH) {
3204 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
3205 eth_zero_addr(p_vf->shadow_config.macs[i]);
3208 /* List the new MAC address */
3209 if (p_params->opcode != QED_FILTER_ADD &&
3210 p_params->opcode != QED_FILTER_REPLACE)
3213 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3214 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
3215 ether_addr_copy(p_vf->shadow_config.macs[i],
3217 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3218 "Added MAC at %d entry in shadow\n", i);
3223 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3224 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
3232 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
3233 struct qed_vf_info *p_vf,
3234 struct qed_filter_ucast *p_params)
3238 if (p_params->type == QED_FILTER_MAC) {
3239 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3244 if (p_params->type == QED_FILTER_VLAN)
3245 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3250 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
3251 int vfid, struct qed_filter_ucast *params)
3253 struct qed_public_vf_info *vf;
3255 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3259 /* No real decision to make; Store the configured MAC */
3260 if (params->type == QED_FILTER_MAC ||
3261 params->type == QED_FILTER_MAC_VLAN)
3262 ether_addr_copy(vf->mac, params->mac);
3267 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
3268 struct qed_ptt *p_ptt,
3269 struct qed_vf_info *vf)
3271 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3272 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3273 struct vfpf_ucast_filter_tlv *req;
3274 u8 status = PFVF_STATUS_SUCCESS;
3275 struct qed_filter_ucast params;
3278 /* Prepare the unicast filter params */
3279 memset(¶ms, 0, sizeof(struct qed_filter_ucast));
3280 req = &mbx->req_virt->ucast_filter;
3281 params.opcode = (enum qed_filter_opcode)req->opcode;
3282 params.type = (enum qed_filter_ucast_type)req->type;
3284 params.is_rx_filter = 1;
3285 params.is_tx_filter = 1;
3286 params.vport_to_remove_from = vf->vport_id;
3287 params.vport_to_add_to = vf->vport_id;
3288 memcpy(params.mac, req->mac, ETH_ALEN);
3289 params.vlan = req->vlan;
3293 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3294 vf->abs_vf_id, params.opcode, params.type,
3295 params.is_rx_filter ? "RX" : "",
3296 params.is_tx_filter ? "TX" : "",
3297 params.vport_to_add_to,
3298 params.mac[0], params.mac[1],
3299 params.mac[2], params.mac[3],
3300 params.mac[4], params.mac[5], params.vlan);
3302 if (!vf->vport_instance) {
3305 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3307 status = PFVF_STATUS_FAILURE;
3311 /* Update shadow copy of the VF configuration */
3312 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) {
3313 status = PFVF_STATUS_FAILURE;
3317 /* Determine if the unicast filtering is acceptible by PF */
3318 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
3319 (params.type == QED_FILTER_VLAN ||
3320 params.type == QED_FILTER_MAC_VLAN)) {
3321 /* Once VLAN is forced or PVID is set, do not allow
3322 * to add/replace any further VLANs.
3324 if (params.opcode == QED_FILTER_ADD ||
3325 params.opcode == QED_FILTER_REPLACE)
3326 status = PFVF_STATUS_FORCED;
3330 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
3331 (params.type == QED_FILTER_MAC ||
3332 params.type == QED_FILTER_MAC_VLAN)) {
3333 if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
3334 (params.opcode != QED_FILTER_ADD &&
3335 params.opcode != QED_FILTER_REPLACE))
3336 status = PFVF_STATUS_FORCED;
3340 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms);
3342 status = PFVF_STATUS_FAILURE;
3346 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3347 QED_SPQ_MODE_CB, NULL);
3349 status = PFVF_STATUS_FAILURE;
3352 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3353 sizeof(struct pfvf_def_resp_tlv), status);
3356 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
3357 struct qed_ptt *p_ptt,
3358 struct qed_vf_info *vf)
3363 for (i = 0; i < vf->num_sbs; i++)
3364 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3366 vf->opaque_fid, false);
3368 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3369 sizeof(struct pfvf_def_resp_tlv),
3370 PFVF_STATUS_SUCCESS);
3373 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
3374 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
3376 u16 length = sizeof(struct pfvf_def_resp_tlv);
3377 u8 status = PFVF_STATUS_SUCCESS;
3379 /* Disable Interrupts for VF */
3380 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3382 /* Reset Permission table */
3383 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3385 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3389 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
3390 struct qed_ptt *p_ptt,
3391 struct qed_vf_info *p_vf)
3393 u16 length = sizeof(struct pfvf_def_resp_tlv);
3394 u8 status = PFVF_STATUS_SUCCESS;
3397 qed_iov_vf_cleanup(p_hwfn, p_vf);
3399 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3400 /* Stopping the VF */
3401 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3405 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
3407 status = PFVF_STATUS_FAILURE;
3410 p_vf->state = VF_STOPPED;
3413 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3417 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
3418 struct qed_ptt *p_ptt,
3419 struct qed_vf_info *p_vf)
3421 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3422 struct pfvf_read_coal_resp_tlv *p_resp;
3423 struct vfpf_read_coal_req_tlv *req;
3424 u8 status = PFVF_STATUS_FAILURE;
3425 struct qed_vf_queue *p_queue;
3426 struct qed_queue_cid *p_cid;
3427 u16 coal = 0, qid, i;
3431 mbx->offset = (u8 *)mbx->reply_virt;
3432 req = &mbx->req_virt->read_coal_req;
3435 b_is_rx = req->is_rx ? true : false;
3438 if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
3439 QED_IOV_VALIDATE_Q_ENABLE)) {
3440 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3441 "VF[%d]: Invalid Rx queue_id = %d\n",
3442 p_vf->abs_vf_id, qid);
3446 p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3447 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3451 if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
3452 QED_IOV_VALIDATE_Q_ENABLE)) {
3453 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3454 "VF[%d]: Invalid Tx queue_id = %d\n",
3455 p_vf->abs_vf_id, qid);
3458 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3459 p_queue = &p_vf->vf_queues[qid];
3460 if ((!p_queue->cids[i].p_cid) ||
3461 (!p_queue->cids[i].b_is_tx))
3464 p_cid = p_queue->cids[i].p_cid;
3466 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3473 status = PFVF_STATUS_SUCCESS;
3476 p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
3478 p_resp->coal = coal;
3480 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
3481 sizeof(struct channel_list_end_tlv));
3483 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3486 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
3487 struct qed_ptt *p_ptt,
3488 struct qed_vf_info *vf)
3490 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3491 struct vfpf_update_coalesce *req;
3492 u8 status = PFVF_STATUS_FAILURE;
3493 struct qed_queue_cid *p_cid;
3494 u16 rx_coal, tx_coal;
3498 req = &mbx->req_virt->update_coalesce;
3500 rx_coal = req->rx_coal;
3501 tx_coal = req->tx_coal;
3504 if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
3505 QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) {
3506 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3507 "VF[%d]: Invalid Rx queue_id = %d\n",
3508 vf->abs_vf_id, qid);
3512 if (!qed_iov_validate_txq(p_hwfn, vf, qid,
3513 QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) {
3514 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3515 "VF[%d]: Invalid Tx queue_id = %d\n",
3516 vf->abs_vf_id, qid);
3522 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3523 vf->abs_vf_id, rx_coal, tx_coal, qid);
3526 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3528 rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3532 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3533 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3536 vf->rx_coal = rx_coal;
3540 struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
3542 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3543 if (!p_queue->cids[i].p_cid)
3546 if (!p_queue->cids[i].b_is_tx)
3549 rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3550 p_queue->cids[i].p_cid);
3555 "VF[%d]: Unable to set tx queue coalesce\n",
3560 vf->tx_coal = tx_coal;
3563 status = PFVF_STATUS_SUCCESS;
3565 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3566 sizeof(struct pfvf_def_resp_tlv), status);
3569 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
3570 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3575 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
3577 for (cnt = 0; cnt < 50; cnt++) {
3578 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3583 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
3587 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3588 p_vf->abs_vf_id, val);
3596 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
3597 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3599 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3602 /* Read initial consumers & producers */
3603 for (i = 0; i < MAX_NUM_VOQS; i++) {
3606 cons[i] = qed_rd(p_hwfn, p_ptt,
3607 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3609 prod = qed_rd(p_hwfn, p_ptt,
3610 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3612 distance[i] = prod - cons[i];
3615 /* Wait for consumers to pass the producers */
3617 for (cnt = 0; cnt < 50; cnt++) {
3618 for (; i < MAX_NUM_VOQS; i++) {
3621 tmp = qed_rd(p_hwfn, p_ptt,
3622 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3624 if (distance[i] > tmp - cons[i])
3628 if (i == MAX_NUM_VOQS)
3635 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3636 p_vf->abs_vf_id, i);
3643 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
3644 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3648 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3652 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3660 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3661 struct qed_ptt *p_ptt,
3662 u16 rel_vf_id, u32 *ack_vfs)
3664 struct qed_vf_info *p_vf;
3667 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3671 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3672 (1ULL << (rel_vf_id % 64))) {
3673 u16 vfid = p_vf->abs_vf_id;
3675 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3676 "VF[%d] - Handling FLR\n", vfid);
3678 qed_iov_vf_cleanup(p_hwfn, p_vf);
3680 /* If VF isn't active, no need for anything but SW */
3684 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3688 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
3690 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3694 /* Workaround to make VF-PF channel ready, as FW
3695 * doesn't do that as a part of FLR.
3698 GTT_BAR0_MAP_REG_USDM_RAM +
3699 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3701 /* VF_STOPPED has to be set only after final cleanup
3702 * but prior to re-enabling the VF.
3704 p_vf->state = VF_STOPPED;
3706 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3708 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3713 /* Mark VF for ack and clean pending state */
3714 if (p_vf->state == VF_RESET)
3715 p_vf->state = VF_STOPPED;
3716 ack_vfs[vfid / 32] |= BIT((vfid % 32));
3717 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3718 ~(1ULL << (rel_vf_id % 64));
3719 p_vf->vf_mbx.b_pending_msg = false;
3726 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3728 u32 ack_vfs[VF_MAX_STATIC / 32];
3732 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3734 /* Since BRB <-> PRS interface can't be tested as part of the flr
3735 * polling due to HW limitations, simply sleep a bit. And since
3736 * there's no need to wait per-vf, do it before looping.
3740 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3741 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3743 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3747 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
3752 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3753 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3754 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3755 "[%08x,...,%08x]: %08x\n",
3756 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3758 if (!p_hwfn->cdev->p_iov_info) {
3759 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
3764 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3765 struct qed_vf_info *p_vf;
3768 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3772 vfid = p_vf->abs_vf_id;
3773 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3774 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3775 u16 rel_vf_id = p_vf->relative_vf_id;
3777 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3778 "VF[%d] [rel %d] got FLR-ed\n",
3781 p_vf->state = VF_RESET;
3783 /* No need to lock here, since pending_flr should
3784 * only change here and before ACKing MFw. Since
3785 * MFW will not trigger an additional attention for
3786 * VF flr until ACKs, we're safe.
3788 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3796 static int qed_iov_get_link(struct qed_hwfn *p_hwfn,
3798 struct qed_mcp_link_params *p_params,
3799 struct qed_mcp_link_state *p_link,
3800 struct qed_mcp_link_capabilities *p_caps)
3802 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3805 struct qed_bulletin_content *p_bulletin;
3810 p_bulletin = p_vf->bulletin.p_virt;
3813 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3815 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3817 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3821 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3822 struct qed_ptt *p_ptt, int vfid)
3824 struct qed_iov_vf_mbx *mbx;
3825 struct qed_vf_info *p_vf;
3827 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3831 mbx = &p_vf->vf_mbx;
3833 /* qed_iov_process_mbx_request */
3834 if (!mbx->b_pending_msg) {
3836 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3840 mbx->b_pending_msg = false;
3842 mbx->first_tlv = mbx->req_virt->first_tlv;
3844 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3845 "VF[%02x]: Processing mailbox message [type %04x]\n",
3846 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3848 /* check if tlv type is known */
3849 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3850 !p_vf->b_malicious) {
3851 switch (mbx->first_tlv.tl.type) {
3852 case CHANNEL_TLV_ACQUIRE:
3853 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3855 case CHANNEL_TLV_VPORT_START:
3856 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3858 case CHANNEL_TLV_VPORT_TEARDOWN:
3859 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3861 case CHANNEL_TLV_START_RXQ:
3862 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3864 case CHANNEL_TLV_START_TXQ:
3865 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3867 case CHANNEL_TLV_STOP_RXQS:
3868 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3870 case CHANNEL_TLV_STOP_TXQS:
3871 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3873 case CHANNEL_TLV_UPDATE_RXQ:
3874 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3876 case CHANNEL_TLV_VPORT_UPDATE:
3877 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3879 case CHANNEL_TLV_UCAST_FILTER:
3880 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3882 case CHANNEL_TLV_CLOSE:
3883 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3885 case CHANNEL_TLV_INT_CLEANUP:
3886 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3888 case CHANNEL_TLV_RELEASE:
3889 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3891 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3892 qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3894 case CHANNEL_TLV_COALESCE_UPDATE:
3895 qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3897 case CHANNEL_TLV_COALESCE_READ:
3898 qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
3901 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3902 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3903 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3904 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3906 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3907 mbx->first_tlv.tl.type,
3908 sizeof(struct pfvf_def_resp_tlv),
3909 PFVF_STATUS_MALICIOUS);
3911 /* unknown TLV - this may belong to a VF driver from the future
3912 * - a version written after this PF driver was written, which
3913 * supports features unknown as of yet. Too bad since we don't
3914 * support them. Or this may be because someone wrote a crappy
3915 * VF driver and is sending garbage over the channel.
3918 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3920 mbx->first_tlv.tl.type,
3921 mbx->first_tlv.tl.length,
3922 mbx->first_tlv.padding, mbx->first_tlv.reply_address);
3924 /* Try replying in case reply address matches the acquisition's
3927 if (p_vf->acquire.first_tlv.reply_address &&
3928 (mbx->first_tlv.reply_address ==
3929 p_vf->acquire.first_tlv.reply_address)) {
3930 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3931 mbx->first_tlv.tl.type,
3932 sizeof(struct pfvf_def_resp_tlv),
3933 PFVF_STATUS_NOT_SUPPORTED);
3937 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3943 void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
3947 memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3949 qed_for_each_vf(p_hwfn, i) {
3950 struct qed_vf_info *p_vf;
3952 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
3953 if (p_vf->vf_mbx.b_pending_msg)
3954 events[i / 64] |= 1ULL << (i % 64);
3958 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
3961 u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
3963 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3966 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
3971 return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
3974 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
3975 u16 abs_vfid, struct regpair *vf_msg)
3977 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
3983 /* List the physical address of the request so that handler
3984 * could later on copy the message from it.
3986 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3988 /* Mark the event and schedule the workqueue */
3989 p_vf->vf_mbx.b_pending_msg = true;
3990 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
3995 static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
3996 struct malicious_vf_eqe_data *p_data)
3998 struct qed_vf_info *p_vf;
4000 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4005 if (!p_vf->b_malicious) {
4007 "VF [%d] - Malicious behavior [%02x]\n",
4008 p_vf->abs_vf_id, p_data->err_id);
4010 p_vf->b_malicious = true;
4013 "VF [%d] - Malicious behavior [%02x]\n",
4014 p_vf->abs_vf_id, p_data->err_id);
4018 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
4021 union event_ring_data *data, u8 fw_return_code)
4024 case COMMON_EVENT_VF_PF_CHANNEL:
4025 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
4026 &data->vf_pf_channel.msg_addr);
4027 case COMMON_EVENT_MALICIOUS_VF:
4028 qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4031 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
4037 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4039 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
4045 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4046 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4053 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
4056 struct qed_dmae_params params;
4057 struct qed_vf_info *vf_info;
4059 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4063 memset(¶ms, 0, sizeof(struct qed_dmae_params));
4064 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
4065 params.src_vfid = vf_info->abs_vf_id;
4067 if (qed_dmae_host2host(p_hwfn, ptt,
4068 vf_info->vf_mbx.pending_req,
4069 vf_info->vf_mbx.req_phys,
4070 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4071 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4072 "Failed to copy message from VF 0x%02x\n", vfid);
4080 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
4083 struct qed_vf_info *vf_info;
4086 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4088 DP_NOTICE(p_hwfn->cdev,
4089 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4093 if (vf_info->b_malicious) {
4094 DP_NOTICE(p_hwfn->cdev,
4095 "Can't set forced MAC to malicious VF [%d]\n", vfid);
4099 feature = 1 << MAC_ADDR_FORCED;
4100 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4102 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4103 /* Forced MAC will disable MAC_ADDR */
4104 vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
4106 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4109 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
4112 struct qed_vf_info *vf_info;
4115 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4117 DP_NOTICE(p_hwfn->cdev,
4118 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4122 if (vf_info->b_malicious) {
4123 DP_NOTICE(p_hwfn->cdev,
4124 "Can't set forced vlan to malicious VF [%d]\n", vfid);
4128 feature = 1 << VLAN_ADDR_FORCED;
4129 vf_info->bulletin.p_virt->pvid = pvid;
4131 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4133 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4135 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4138 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
4139 int vfid, u16 vxlan_port, u16 geneve_port)
4141 struct qed_vf_info *vf_info;
4143 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4145 DP_NOTICE(p_hwfn->cdev,
4146 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4150 if (vf_info->b_malicious) {
4151 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4152 "Can not set udp ports to malicious VF [%d]\n",
4157 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4158 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4161 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
4163 struct qed_vf_info *p_vf_info;
4165 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4169 return !!p_vf_info->vport_instance;
4172 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
4174 struct qed_vf_info *p_vf_info;
4176 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4180 return p_vf_info->state == VF_STOPPED;
4183 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
4185 struct qed_vf_info *vf_info;
4187 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4191 return vf_info->spoof_chk;
4194 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
4196 struct qed_vf_info *vf;
4199 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4201 "SR-IOV sanity check failed, can't set spoofchk\n");
4205 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4209 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4210 /* After VF VPORT start PF will configure spoof check */
4211 vf->req_spoofchk_val = val;
4216 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
4222 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
4225 struct qed_vf_info *p_vf;
4227 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4228 if (!p_vf || !p_vf->bulletin.p_virt)
4231 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
4234 return p_vf->bulletin.p_virt->mac;
4238 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4240 struct qed_vf_info *p_vf;
4242 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4243 if (!p_vf || !p_vf->bulletin.p_virt)
4246 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
4249 return p_vf->bulletin.p_virt->pvid;
4252 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
4253 struct qed_ptt *p_ptt, int vfid, int val)
4255 struct qed_vf_info *vf;
4259 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4263 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4267 return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
4271 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
4273 struct qed_vf_info *vf;
4277 for_each_hwfn(cdev, i) {
4278 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4280 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4282 "SR-IOV sanity check failed, can't set min rate\n");
4287 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
4288 vport_id = vf->vport_id;
4290 return qed_configure_vport_wfq(cdev, vport_id, rate);
4293 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
4295 struct qed_wfq_data *vf_vp_wfq;
4296 struct qed_vf_info *vf_info;
4298 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4302 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4304 if (vf_vp_wfq->configured)
4305 return vf_vp_wfq->min_speed;
4311 * qed_schedule_iov - schedules IOV task for VF and PF
4312 * @hwfn: hardware function pointer
4313 * @flag: IOV flag for VF/PF
4315 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
4317 smp_mb__before_atomic();
4318 set_bit(flag, &hwfn->iov_task_flags);
4319 smp_mb__after_atomic();
4320 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
4321 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
4324 void qed_vf_start_iov_wq(struct qed_dev *cdev)
4328 for_each_hwfn(cdev, i)
4329 queue_delayed_work(cdev->hwfns[i].iov_wq,
4330 &cdev->hwfns[i].iov_task, 0);
4333 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
4337 for_each_hwfn(cdev, i)
4338 if (cdev->hwfns[i].iov_wq)
4339 flush_workqueue(cdev->hwfns[i].iov_wq);
4341 /* Mark VFs for disablement */
4342 qed_iov_set_vfs_to_disable(cdev, true);
4344 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
4345 pci_disable_sriov(cdev->pdev);
4347 for_each_hwfn(cdev, i) {
4348 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4349 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4351 /* Failure to acquire the ptt in 100g creates an odd error
4352 * where the first engine has already relased IOV.
4355 DP_ERR(hwfn, "Failed to acquire ptt\n");
4359 /* Clean WFQ db and configure equal weight for all vports */
4360 qed_clean_wfq_db(hwfn, ptt);
4362 qed_for_each_vf(hwfn, j) {
4365 if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
4368 /* Wait until VF is disabled before releasing */
4369 for (k = 0; k < 100; k++) {
4370 if (!qed_iov_is_vf_stopped(hwfn, j))
4377 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
4381 "Timeout waiting for VF's FLR to end\n");
4384 qed_ptt_release(hwfn, ptt);
4387 qed_iov_set_vfs_to_disable(cdev, false);
4392 static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4394 struct qed_iov_vf_init_params *params)
4398 /* Since we have an equal resource distribution per-VF, and we assume
4399 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4400 * sequentially from there.
4402 base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
4404 params->rel_vf_id = vfid;
4405 for (i = 0; i < params->num_queues; i++) {
4406 params->req_rx_queue[i] = base + i;
4407 params->req_tx_queue[i] = base + i;
4411 static int qed_sriov_enable(struct qed_dev *cdev, int num)
4413 struct qed_iov_vf_init_params params;
4414 struct qed_hwfn *hwfn;
4415 struct qed_ptt *ptt;
4418 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
4419 DP_NOTICE(cdev, "Can start at most %d VFs\n",
4420 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
4424 memset(¶ms, 0, sizeof(params));
4426 /* Initialize HW for VF access */
4427 for_each_hwfn(cdev, j) {
4428 hwfn = &cdev->hwfns[j];
4429 ptt = qed_ptt_acquire(hwfn);
4431 /* Make sure not to use more than 16 queues per VF */
4432 params.num_queues = min_t(int,
4433 FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
4437 DP_ERR(hwfn, "Failed to acquire ptt\n");
4442 for (i = 0; i < num; i++) {
4443 if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
4446 qed_sriov_enable_qid_config(hwfn, i, ¶ms);
4447 rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
4449 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
4450 qed_ptt_release(hwfn, ptt);
4455 qed_ptt_release(hwfn, ptt);
4458 /* Enable SRIOV PCIe functions */
4459 rc = pci_enable_sriov(cdev->pdev, num);
4461 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
4465 hwfn = QED_LEADING_HWFN(cdev);
4466 ptt = qed_ptt_acquire(hwfn);
4468 DP_ERR(hwfn, "Failed to acquire ptt\n");
4473 rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4475 DP_INFO(cdev, "Failed to update eswitch mode\n");
4476 qed_ptt_release(hwfn, ptt);
4481 qed_sriov_disable(cdev, false);
4485 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
4487 if (!IS_QED_SRIOV(cdev)) {
4488 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
4493 return qed_sriov_enable(cdev, num_vfs_param);
4495 return qed_sriov_disable(cdev, true);
4498 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
4502 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4503 DP_VERBOSE(cdev, QED_MSG_IOV,
4504 "Cannot set a VF MAC; Sriov is not enabled\n");
4508 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4509 DP_VERBOSE(cdev, QED_MSG_IOV,
4510 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4514 for_each_hwfn(cdev, i) {
4515 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4516 struct qed_public_vf_info *vf_info;
4518 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4522 /* Set the forced MAC, and schedule the IOV task */
4523 ether_addr_copy(vf_info->forced_mac, mac);
4524 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4530 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
4534 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4535 DP_VERBOSE(cdev, QED_MSG_IOV,
4536 "Cannot set a VF MAC; Sriov is not enabled\n");
4540 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4541 DP_VERBOSE(cdev, QED_MSG_IOV,
4542 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4546 for_each_hwfn(cdev, i) {
4547 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4548 struct qed_public_vf_info *vf_info;
4550 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4554 /* Set the forced vlan, and schedule the IOV task */
4555 vf_info->forced_vlan = vid;
4556 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4562 static int qed_get_vf_config(struct qed_dev *cdev,
4563 int vf_id, struct ifla_vf_info *ivi)
4565 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
4566 struct qed_public_vf_info *vf_info;
4567 struct qed_mcp_link_state link;
4571 /* Sanitize request */
4575 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
4576 DP_VERBOSE(cdev, QED_MSG_IOV,
4577 "VF index [%d] isn't active\n", vf_id);
4581 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4583 ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
4587 /* Fill information about VF */
4590 if (is_valid_ether_addr(vf_info->forced_mac))
4591 ether_addr_copy(ivi->mac, vf_info->forced_mac);
4593 ether_addr_copy(ivi->mac, vf_info->mac);
4595 ivi->vlan = vf_info->forced_vlan;
4596 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
4597 ivi->linkstate = vf_info->link_state;
4598 tx_rate = vf_info->tx_rate;
4599 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
4600 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
4601 ivi->trusted = vf_info->is_trusted_request;
4606 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
4608 struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
4609 struct qed_mcp_link_capabilities caps;
4610 struct qed_mcp_link_params params;
4611 struct qed_mcp_link_state link;
4614 if (!hwfn->pf_iov_info)
4617 /* Update bulletin of all future possible VFs with link configuration */
4618 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
4619 struct qed_public_vf_info *vf_info;
4621 vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
4625 /* Only hwfn0 is actually interested in the link speed.
4626 * But since only it would receive an MFW indication of link,
4627 * need to take configuration from it - otherwise things like
4628 * rate limiting for hwfn1 VF would not work.
4630 memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn),
4632 memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
4633 memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
4636 /* Modify link according to the VF's configured link state */
4637 switch (vf_info->link_state) {
4638 case IFLA_VF_LINK_STATE_DISABLE:
4639 link.link_up = false;
4641 case IFLA_VF_LINK_STATE_ENABLE:
4642 link.link_up = true;
4643 /* Set speed according to maximum supported by HW.
4644 * that is 40G for regular devices and 100G for CMT
4647 link.speed = (hwfn->cdev->num_hwfns > 1) ?
4650 /* In auto mode pass PF link image to VF */
4654 if (link.link_up && vf_info->tx_rate) {
4655 struct qed_ptt *ptt;
4658 rate = min_t(int, vf_info->tx_rate, link.speed);
4660 ptt = qed_ptt_acquire(hwfn);
4662 DP_NOTICE(hwfn, "Failed to acquire PTT\n");
4666 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
4667 vf_info->tx_rate = rate;
4671 qed_ptt_release(hwfn, ptt);
4674 qed_iov_set_link(hwfn, i, ¶ms, &link, &caps);
4677 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4680 static int qed_set_vf_link_state(struct qed_dev *cdev,
4681 int vf_id, int link_state)
4685 /* Sanitize request */
4689 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
4690 DP_VERBOSE(cdev, QED_MSG_IOV,
4691 "VF index [%d] isn't active\n", vf_id);
4695 /* Handle configuration of link state */
4696 for_each_hwfn(cdev, i) {
4697 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4698 struct qed_public_vf_info *vf;
4700 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4704 if (vf->link_state == link_state)
4707 vf->link_state = link_state;
4708 qed_inform_vf_link_state(&cdev->hwfns[i]);
4714 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
4716 int i, rc = -EINVAL;
4718 for_each_hwfn(cdev, i) {
4719 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4721 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
4729 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
4733 for_each_hwfn(cdev, i) {
4734 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4735 struct qed_public_vf_info *vf;
4737 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4739 "SR-IOV sanity check failed, can't set tx rate\n");
4743 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
4747 qed_inform_vf_link_state(p_hwfn);
4753 static int qed_set_vf_rate(struct qed_dev *cdev,
4754 int vfid, u32 min_rate, u32 max_rate)
4756 int rc_min = 0, rc_max = 0;
4759 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
4762 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
4764 if (rc_max | rc_min)
4770 static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
4774 for_each_hwfn(cdev, i) {
4775 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4776 struct qed_public_vf_info *vf;
4778 if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
4780 "SR-IOV sanity check failed, can't set trust\n");
4784 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
4786 if (vf->is_trusted_request == trust)
4788 vf->is_trusted_request = trust;
4790 qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4796 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4798 u64 events[QED_VF_ARRAY_LENGTH];
4799 struct qed_ptt *ptt;
4802 ptt = qed_ptt_acquire(hwfn);
4804 DP_VERBOSE(hwfn, QED_MSG_IOV,
4805 "Can't acquire PTT; re-scheduling\n");
4806 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4810 qed_iov_pf_get_pending_events(hwfn, events);
4812 DP_VERBOSE(hwfn, QED_MSG_IOV,
4813 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4814 events[0], events[1], events[2]);
4816 qed_for_each_vf(hwfn, i) {
4817 /* Skip VFs with no pending messages */
4818 if (!(events[i / 64] & (1ULL << (i % 64))))
4821 DP_VERBOSE(hwfn, QED_MSG_IOV,
4822 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4823 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4825 /* Copy VF's message to PF's request buffer for that VF */
4826 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4829 qed_iov_process_mbx_req(hwfn, ptt, i);
4832 qed_ptt_release(hwfn, ptt);
4835 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4839 qed_for_each_vf(hwfn, i) {
4840 struct qed_public_vf_info *info;
4841 bool update = false;
4844 info = qed_iov_get_public_vf_info(hwfn, i, true);
4848 /* Update data on bulletin board */
4849 mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4850 if (is_valid_ether_addr(info->forced_mac) &&
4851 (!mac || !ether_addr_equal(mac, info->forced_mac))) {
4854 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4856 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4858 /* Update bulletin board with forced MAC */
4859 qed_iov_bulletin_set_forced_mac(hwfn,
4860 info->forced_mac, i);
4864 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
4865 info->forced_vlan) {
4868 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
4871 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4872 qed_iov_bulletin_set_forced_vlan(hwfn,
4873 info->forced_vlan, i);
4878 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4882 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
4884 struct qed_ptt *ptt;
4887 ptt = qed_ptt_acquire(hwfn);
4889 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
4890 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4894 qed_for_each_vf(hwfn, i)
4895 qed_iov_post_vf_bulletin(hwfn, i, ptt);
4897 qed_ptt_release(hwfn, ptt);
4900 static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
4902 struct qed_sp_vport_update_params params;
4903 struct qed_filter_accept_flags *flags;
4904 struct qed_public_vf_info *vf_info;
4905 struct qed_vf_info *vf;
4909 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
4910 flags = ¶ms.accept_flags;
4912 qed_for_each_vf(hwfn, i) {
4913 /* Need to make sure current requested configuration didn't
4914 * flip so that we'll end up configuring something that's not
4917 vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
4918 if (vf_info->is_trusted_configured ==
4919 vf_info->is_trusted_request)
4921 vf_info->is_trusted_configured = vf_info->is_trusted_request;
4923 /* Validate that the VF has a configured vport */
4924 vf = qed_iov_get_vf_info(hwfn, i, true);
4925 if (!vf->vport_instance)
4928 memset(¶ms, 0, sizeof(params));
4929 params.opaque_fid = vf->opaque_fid;
4930 params.vport_id = vf->vport_id;
4932 params.update_ctl_frame_check = 1;
4933 params.mac_chk_en = !vf_info->is_trusted_configured;
4934 params.update_accept_any_vlan_flg = 0;
4936 if (vf_info->accept_any_vlan && vf_info->forced_vlan) {
4937 params.update_accept_any_vlan_flg = 1;
4938 params.accept_any_vlan = vf_info->accept_any_vlan;
4941 if (vf_info->rx_accept_mode & mask) {
4942 flags->update_rx_mode_config = 1;
4943 flags->rx_accept_filter = vf_info->rx_accept_mode;
4946 if (vf_info->tx_accept_mode & mask) {
4947 flags->update_tx_mode_config = 1;
4948 flags->tx_accept_filter = vf_info->tx_accept_mode;
4951 /* Remove if needed; Otherwise this would set the mask */
4952 if (!vf_info->is_trusted_configured) {
4953 flags->rx_accept_filter &= ~mask;
4954 flags->tx_accept_filter &= ~mask;
4955 params.accept_any_vlan = false;
4958 if (flags->update_rx_mode_config ||
4959 flags->update_tx_mode_config ||
4960 params.update_ctl_frame_check ||
4961 params.update_accept_any_vlan_flg) {
4962 DP_VERBOSE(hwfn, QED_MSG_IOV,
4963 "vport update config for %s VF[abs 0x%x rel 0x%x]\n",
4964 vf_info->is_trusted_configured ? "trusted" : "untrusted",
4965 vf->abs_vf_id, vf->relative_vf_id);
4966 qed_sp_vport_update(hwfn, ¶ms,
4967 QED_SPQ_MODE_EBLOCK, NULL);
4972 static void qed_iov_pf_task(struct work_struct *work)
4975 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
4979 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
4982 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
4983 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4986 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
4990 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
4992 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
4994 qed_ptt_release(hwfn, ptt);
4997 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
4998 qed_handle_vf_msg(hwfn);
5000 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
5001 &hwfn->iov_task_flags))
5002 qed_handle_pf_set_vf_unicast(hwfn);
5004 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
5005 &hwfn->iov_task_flags))
5006 qed_handle_bulletin_post(hwfn);
5008 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
5009 qed_iov_handle_trust_change(hwfn);
5012 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
5016 for_each_hwfn(cdev, i) {
5017 if (!cdev->hwfns[i].iov_wq)
5020 if (schedule_first) {
5021 qed_schedule_iov(&cdev->hwfns[i],
5022 QED_IOV_WQ_STOP_WQ_FLAG);
5023 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
5026 flush_workqueue(cdev->hwfns[i].iov_wq);
5027 destroy_workqueue(cdev->hwfns[i].iov_wq);
5031 int qed_iov_wq_start(struct qed_dev *cdev)
5033 char name[NAME_SIZE];
5036 for_each_hwfn(cdev, i) {
5037 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5039 /* PFs needs a dedicated workqueue only if they support IOV.
5040 * VFs always require one.
5042 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
5045 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
5046 cdev->pdev->bus->number,
5047 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
5049 p_hwfn->iov_wq = create_singlethread_workqueue(name);
5050 if (!p_hwfn->iov_wq) {
5051 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
5056 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
5058 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
5064 const struct qed_iov_hv_ops qed_iov_ops_pass = {
5065 .configure = &qed_sriov_configure,
5066 .set_mac = &qed_sriov_pf_set_mac,
5067 .set_vlan = &qed_sriov_pf_set_vlan,
5068 .get_config = &qed_get_vf_config,
5069 .set_link_state = &qed_set_vf_link_state,
5070 .set_spoof = &qed_spoof_configure,
5071 .set_rate = &qed_set_vf_rate,
5072 .set_trust = &qed_set_vf_trust,