2 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Freescale Semiconductor nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * ALTERNATIVELY, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") as published by the Free Software
18 * Foundation, either version 2 of that License or (at your option) any
21 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
22 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #ifndef __FSL_QBMAN_PORTAL_H
33 #define __FSL_QBMAN_PORTAL_H
35 #include "../../include/dpaa2-fd.h"
40 /* qbman software portal descriptor structure */
41 struct qbman_swp_desc {
42 void *cena_bar; /* Cache-enabled portal base address */
43 void *cinh_bar; /* Cache-inhibited portal base address */
47 #define QBMAN_SWP_INTERRUPT_EQRI 0x01
48 #define QBMAN_SWP_INTERRUPT_EQDI 0x02
49 #define QBMAN_SWP_INTERRUPT_DQRI 0x04
50 #define QBMAN_SWP_INTERRUPT_RCRI 0x08
51 #define QBMAN_SWP_INTERRUPT_RCDI 0x10
52 #define QBMAN_SWP_INTERRUPT_VDCI 0x20
54 /* the structure for pull dequeue descriptor */
55 struct qbman_pull_desc {
66 enum qbman_pull_type_e {
67 /* dequeue with priority precedence, respect intra-class scheduling */
68 qbman_pull_type_prio = 1,
69 /* dequeue with active FQ precedence, respect ICS */
70 qbman_pull_type_active,
71 /* dequeue with active FQ precedence, no ICS */
72 qbman_pull_type_active_noics
75 /* Definitions for parsing dequeue entries */
76 #define QBMAN_RESULT_MASK 0x7f
77 #define QBMAN_RESULT_DQ 0x60
78 #define QBMAN_RESULT_FQRN 0x21
79 #define QBMAN_RESULT_FQRNI 0x22
80 #define QBMAN_RESULT_FQPN 0x24
81 #define QBMAN_RESULT_FQDAN 0x25
82 #define QBMAN_RESULT_CDAN 0x26
83 #define QBMAN_RESULT_CSCN_MEM 0x27
84 #define QBMAN_RESULT_CGCU 0x28
85 #define QBMAN_RESULT_BPSCN 0x29
86 #define QBMAN_RESULT_CSCN_WQ 0x2a
88 /* QBMan FQ management command codes */
89 #define QBMAN_FQ_SCHEDULE 0x48
90 #define QBMAN_FQ_FORCE 0x49
91 #define QBMAN_FQ_XON 0x4d
92 #define QBMAN_FQ_XOFF 0x4e
94 /* structure of enqueue descriptor */
95 struct qbman_eq_desc {
112 /* buffer release descriptor */
113 struct qbman_release_desc {
121 /* Management command result codes */
122 #define QBMAN_MC_RSLT_OK 0xf0
124 #define CODE_CDAN_WE_EN 0x1
125 #define CODE_CDAN_WE_CTX 0x4
127 /* portal data structure */
129 const struct qbman_swp_desc *desc;
130 void __iomem *addr_cena;
131 void __iomem *addr_cinh;
133 /* Management commands */
135 u32 valid_bit; /* 0x00 or 0x80 */
141 /* Volatile dequeues */
143 atomic_t available; /* indicates if a command can be sent */
144 u32 valid_bit; /* 0x00 or 0x80 */
145 struct dpaa2_dq *storage; /* NULL if DQRR */
153 int reset_bug; /* indicates dqrr reset workaround is needed */
157 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
158 void qbman_swp_finish(struct qbman_swp *p);
159 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
160 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
161 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
162 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
163 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
164 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
166 void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
167 void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
169 void qbman_pull_desc_clear(struct qbman_pull_desc *d);
170 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
171 struct dpaa2_dq *storage,
172 dma_addr_t storage_phys,
174 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
175 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
176 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
177 enum qbman_pull_type_e dct);
178 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
179 enum qbman_pull_type_e dct);
181 int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
183 const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
184 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
186 int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
188 void qbman_eq_desc_clear(struct qbman_eq_desc *d);
189 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
190 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
191 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
192 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
193 u32 qd_bin, u32 qd_prio);
195 int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
196 const struct dpaa2_fd *fd);
198 void qbman_release_desc_clear(struct qbman_release_desc *d);
199 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
200 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
202 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
203 const u64 *buffers, unsigned int num_buffers);
204 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
205 unsigned int num_buffers);
206 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
208 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
209 u8 we_mask, u8 cdan_en,
212 void *qbman_swp_mc_start(struct qbman_swp *p);
213 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
214 void *qbman_swp_mc_result(struct qbman_swp *p);
217 * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
218 * @dq: the dequeue result to be checked
220 * DQRR entries may contain non-dequeue results, ie. notifications
222 static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
224 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
228 * qbman_result_is_SCN() - Check the dequeue result is notification or not
229 * @dq: the dequeue result to be checked
232 static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
234 return !qbman_result_is_DQ(dq);
237 /* FQ Data Availability */
238 static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
240 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
243 /* Channel Data Availability */
244 static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
246 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
249 /* Congestion State Change */
250 static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
252 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
255 /* Buffer Pool State Change */
256 static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
258 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
261 /* Congestion Group Count Update */
262 static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
264 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
268 static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
270 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
273 /* Retirement Immediate */
274 static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
276 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
280 static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
282 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
286 * qbman_result_SCN_state() - Get the state field in State-change notification
288 static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
290 return scn->scn.state;
293 #define SCN_RID_MASK 0x00FFFFFF
296 * qbman_result_SCN_rid() - Get the resource id in State-change notification
298 static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
300 return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
304 * qbman_result_SCN_ctx() - Get the context data in State-change notification
306 static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
308 return le64_to_cpu(scn->scn.ctx);
312 * qbman_swp_fq_schedule() - Move the fq to the scheduled state
313 * @s: the software portal object
314 * @fqid: the index of frame queue to be scheduled
316 * There are a couple of different ways that a FQ can end up parked state,
319 * Return 0 for success, or negative error code for failure.
321 static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
323 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
327 * qbman_swp_fq_force() - Force the FQ to fully scheduled state
328 * @s: the software portal object
329 * @fqid: the index of frame queue to be forced
331 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
332 * and thus be available for selection by any channel-dequeuing behaviour (push
333 * or pull). If the FQ is subsequently "dequeued" from the channel and is still
334 * empty at the time this happens, the resulting dq_entry will have no FD.
335 * (qbman_result_DQ_fd() will return NULL.)
337 * Return 0 for success, or negative error code for failure.
339 static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
341 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
345 * qbman_swp_fq_xon() - sets FQ flow-control to XON
346 * @s: the software portal object
347 * @fqid: the index of frame queue
349 * This setting doesn't affect enqueues to the FQ, just dequeues.
351 * Return 0 for success, or negative error code for failure.
353 static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
355 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
359 * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
360 * @s: the software portal object
361 * @fqid: the index of frame queue
363 * This setting doesn't affect enqueues to the FQ, just dequeues.
364 * XOFF FQs will remain in the tenatively-scheduled state, even when
365 * non-empty, meaning they won't be selected for scheduled dequeuing.
366 * If a FQ is changed to XOFF after it had already become truly-scheduled
367 * to a channel, and a pull dequeue of that channel occurs that selects
368 * that FQ for dequeuing, then the resulting dq_entry will have no FD.
369 * (qbman_result_DQ_fd() will return NULL.)
371 * Return 0 for success, or negative error code for failure.
373 static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
375 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
378 /* If the user has been allocated a channel object that is going to generate
379 * CDANs to another channel, then the qbman_swp_CDAN* functions will be
382 * CDAN-enabled channels only generate a single CDAN notification, after which
383 * they need to be reenabled before they'll generate another. The idea is
384 * that pull dequeuing will occur in reaction to the CDAN, followed by a
385 * reenable step. Each function generates a distinct command to hardware, so a
386 * combination function is provided if the user wishes to modify the "context"
387 * (which shows up in each CDAN message) each time they reenable, as a single
388 * command to hardware.
392 * qbman_swp_CDAN_set_context() - Set CDAN context
393 * @s: the software portal object
394 * @channelid: the channel index
395 * @ctx: the context to be set in CDAN
397 * Return 0 for success, or negative error code for failure.
399 static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
402 return qbman_swp_CDAN_set(s, channelid,
408 * qbman_swp_CDAN_enable() - Enable CDAN for the channel
409 * @s: the software portal object
410 * @channelid: the index of the channel to generate CDAN
412 * Return 0 for success, or negative error code for failure.
414 static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
416 return qbman_swp_CDAN_set(s, channelid,
422 * qbman_swp_CDAN_disable() - disable CDAN for the channel
423 * @s: the software portal object
424 * @channelid: the index of the channel to generate CDAN
426 * Return 0 for success, or negative error code for failure.
428 static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
430 return qbman_swp_CDAN_set(s, channelid,
436 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
437 * @s: the software portal object
438 * @channelid: the index of the channel to generate CDAN
439 * @ctx:i the context set in CDAN
441 * Return 0 for success, or negative error code for failure.
443 static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
447 return qbman_swp_CDAN_set(s, channelid,
448 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
452 /* Wraps up submit + poll-for-result */
453 static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
458 qbman_swp_mc_submit(swp, cmd, cmd_verb);
461 cmd = qbman_swp_mc_result(swp);
462 } while (!cmd && loopvar--);
469 #endif /* __FSL_QBMAN_PORTAL_H */