2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
39 #include "mlx5_core.h"
42 static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
43 struct mlx5_flow_table *ft,
50 static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev,
52 enum fs_flow_table_op_mod op_mod,
53 enum fs_flow_table_type type,
55 unsigned int log_size,
56 struct mlx5_flow_table *next_ft,
57 unsigned int *table_id, u32 flags)
62 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev,
63 struct mlx5_flow_table *ft)
68 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev,
69 struct mlx5_flow_table *ft,
70 struct mlx5_flow_table *next_ft)
75 static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev,
76 struct mlx5_flow_table *ft,
78 unsigned int *group_id)
83 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev,
84 struct mlx5_flow_table *ft,
85 unsigned int group_id)
90 static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
91 struct mlx5_flow_table *ft,
92 struct mlx5_flow_group *group,
98 static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev,
99 struct mlx5_flow_table *ft,
100 unsigned int group_id,
107 static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev,
108 struct mlx5_flow_table *ft,
114 static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
115 struct mlx5_flow_table *ft, u32 underlay_qpn,
118 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
119 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
121 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
125 MLX5_SET(set_flow_table_root_in, in, opcode,
126 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
127 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
130 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
131 MLX5_SET(set_flow_table_root_in, in, table_id, 0);
133 MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
134 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
137 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
139 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
140 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
143 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
146 static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
148 enum fs_flow_table_op_mod op_mod,
149 enum fs_flow_table_type type,
151 unsigned int log_size,
152 struct mlx5_flow_table *next_ft,
153 unsigned int *table_id, u32 flags)
155 int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
156 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
157 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
160 MLX5_SET(create_flow_table_in, in, opcode,
161 MLX5_CMD_OP_CREATE_FLOW_TABLE);
163 MLX5_SET(create_flow_table_in, in, table_type, type);
164 MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
165 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
167 MLX5_SET(create_flow_table_in, in, vport_number, vport);
168 MLX5_SET(create_flow_table_in, in, other_vport, 1);
171 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
173 MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en,
177 case FS_FT_OP_MOD_NORMAL:
179 MLX5_SET(create_flow_table_in, in,
180 flow_table_context.table_miss_action, 1);
181 MLX5_SET(create_flow_table_in, in,
182 flow_table_context.table_miss_id, next_ft->id);
186 case FS_FT_OP_MOD_LAG_DEMUX:
187 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
189 MLX5_SET(create_flow_table_in, in,
190 flow_table_context.lag_master_next_table_id,
195 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
197 *table_id = MLX5_GET(create_flow_table_out, out,
202 static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
203 struct mlx5_flow_table *ft)
205 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
206 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
208 MLX5_SET(destroy_flow_table_in, in, opcode,
209 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
210 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
211 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
213 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
214 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
217 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
220 static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
221 struct mlx5_flow_table *ft,
222 struct mlx5_flow_table *next_ft)
224 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
225 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
227 MLX5_SET(modify_flow_table_in, in, opcode,
228 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
229 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
230 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
232 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
233 MLX5_SET(modify_flow_table_in, in, modify_field_select,
234 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
236 MLX5_SET(modify_flow_table_in, in,
237 flow_table_context.lag_master_next_table_id, next_ft->id);
239 MLX5_SET(modify_flow_table_in, in,
240 flow_table_context.lag_master_next_table_id, 0);
244 MLX5_SET(modify_flow_table_in, in, vport_number,
246 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
248 MLX5_SET(modify_flow_table_in, in, modify_field_select,
249 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
251 MLX5_SET(modify_flow_table_in, in,
252 flow_table_context.table_miss_action, 1);
253 MLX5_SET(modify_flow_table_in, in,
254 flow_table_context.table_miss_id,
257 MLX5_SET(modify_flow_table_in, in,
258 flow_table_context.table_miss_action, 0);
262 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
265 static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
266 struct mlx5_flow_table *ft,
268 unsigned int *group_id)
270 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
271 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
274 MLX5_SET(create_flow_group_in, in, opcode,
275 MLX5_CMD_OP_CREATE_FLOW_GROUP);
276 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
277 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
279 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
280 MLX5_SET(create_flow_group_in, in, other_vport, 1);
283 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
285 *group_id = MLX5_GET(create_flow_group_out, out,
290 static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
291 struct mlx5_flow_table *ft,
292 unsigned int group_id)
294 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
295 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
297 MLX5_SET(destroy_flow_group_in, in, opcode,
298 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
299 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
300 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
301 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
303 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
304 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
307 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
310 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
311 int opmod, int modify_mask,
312 struct mlx5_flow_table *ft,
316 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
317 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
318 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
319 struct mlx5_flow_rule *dst;
320 void *in_flow_context, *vlan;
321 void *in_match_value;
326 in = kvzalloc(inlen, GFP_KERNEL);
330 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
331 MLX5_SET(set_fte_in, in, op_mod, opmod);
332 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
333 MLX5_SET(set_fte_in, in, table_type, ft->type);
334 MLX5_SET(set_fte_in, in, table_id, ft->id);
335 MLX5_SET(set_fte_in, in, flow_index, fte->index);
337 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
338 MLX5_SET(set_fte_in, in, other_vport, 1);
341 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
342 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
344 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
345 MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
346 MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
347 MLX5_SET(flow_context, in_flow_context, modify_header_id,
348 fte->action.modify_id);
350 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
352 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
353 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
354 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
356 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
358 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
359 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
360 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
362 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
364 memcpy(in_match_value, &fte->val, sizeof(fte->val));
366 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
367 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
370 list_for_each_entry(dst, &fte->node.children, node.list) {
371 unsigned int id, type = dst->dest_attr.type;
373 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
377 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
378 id = dst->dest_attr.ft_num;
379 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
381 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
382 id = dst->dest_attr.ft->id;
384 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
385 id = dst->dest_attr.vport.num;
386 MLX5_SET(dest_format_struct, in_dests,
387 destination_eswitch_owner_vhca_id_valid,
388 dst->dest_attr.vport.vhca_id_valid);
389 MLX5_SET(dest_format_struct, in_dests,
390 destination_eswitch_owner_vhca_id,
391 dst->dest_attr.vport.vhca_id);
394 id = dst->dest_attr.tir_num;
397 MLX5_SET(dest_format_struct, in_dests, destination_type,
399 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
400 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
404 MLX5_SET(flow_context, in_flow_context, destination_list_size,
408 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
409 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
410 log_max_flow_counter,
414 list_for_each_entry(dst, &fte->node.children, node.list) {
415 if (dst->dest_attr.type !=
416 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
419 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
420 dst->dest_attr.counter->id);
421 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
424 if (list_size > max_list_size) {
429 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
433 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
439 static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
440 struct mlx5_flow_table *ft,
441 struct mlx5_flow_group *group,
444 unsigned int group_id = group->id;
446 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
449 static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
450 struct mlx5_flow_table *ft,
451 unsigned int group_id,
456 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
457 flow_table_properties_nic_receive.
463 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
466 static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
467 struct mlx5_flow_table *ft,
470 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
471 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
473 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
474 MLX5_SET(delete_fte_in, in, table_type, ft->type);
475 MLX5_SET(delete_fte_in, in, table_id, ft->id);
476 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
478 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
479 MLX5_SET(delete_fte_in, in, other_vport, 1);
482 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
485 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
487 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
488 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
491 MLX5_SET(alloc_flow_counter_in, in, opcode,
492 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
494 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
496 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
500 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
502 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
503 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
505 MLX5_SET(dealloc_flow_counter_in, in, opcode,
506 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
507 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
508 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
511 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
512 u64 *packets, u64 *bytes)
514 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
515 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
516 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
520 MLX5_SET(query_flow_counter_in, in, opcode,
521 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
522 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
523 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
524 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
528 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
529 *packets = MLX5_GET64(traffic_counter, stats, packets);
530 *bytes = MLX5_GET64(traffic_counter, stats, octets);
534 struct mlx5_cmd_fc_bulk {
541 struct mlx5_cmd_fc_bulk *
542 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
544 struct mlx5_cmd_fc_bulk *b;
546 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
547 MLX5_ST_SZ_BYTES(traffic_counter) * num;
549 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
560 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
566 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
568 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
570 MLX5_SET(query_flow_counter_in, in, opcode,
571 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
572 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
573 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
574 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
575 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
578 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
579 struct mlx5_cmd_fc_bulk *b, u32 id,
580 u64 *packets, u64 *bytes)
582 int index = id - b->id;
585 if (index < 0 || index >= b->num) {
586 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
587 id, b->id, b->id + b->num - 1);
591 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
592 flow_statistics[index]);
593 *packets = MLX5_GET64(traffic_counter, stats, packets);
594 *bytes = MLX5_GET64(traffic_counter, stats, octets);
597 int mlx5_encap_alloc(struct mlx5_core_dev *dev,
603 int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
604 u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
605 void *encap_header_in;
611 if (size > max_encap_size) {
612 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
613 size, max_encap_size);
617 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
622 encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
623 header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
624 inlen = header - (void *)in + size;
626 memset(in, 0, inlen);
627 MLX5_SET(alloc_encap_header_in, in, opcode,
628 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
629 MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
630 MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
631 memcpy(header, encap_header, size);
633 memset(out, 0, sizeof(out));
634 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
636 *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
641 void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
643 u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
644 u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
646 memset(in, 0, sizeof(in));
647 MLX5_SET(dealloc_encap_header_in, in, opcode,
648 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
649 MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
651 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
654 int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
655 u8 namespace, u8 num_actions,
656 void *modify_actions, u32 *modify_header_id)
658 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
659 int max_actions, actions_size, inlen, err;
665 case MLX5_FLOW_NAMESPACE_FDB:
666 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
667 table_type = FS_FT_FDB;
669 case MLX5_FLOW_NAMESPACE_KERNEL:
670 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
671 table_type = FS_FT_NIC_RX;
677 if (num_actions > max_actions) {
678 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
679 num_actions, max_actions);
683 actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
684 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
686 in = kzalloc(inlen, GFP_KERNEL);
690 MLX5_SET(alloc_modify_header_context_in, in, opcode,
691 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
692 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
693 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
695 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
696 memcpy(actions_in, modify_actions, actions_size);
698 memset(out, 0, sizeof(out));
699 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
701 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
706 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
708 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
709 u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
711 memset(in, 0, sizeof(in));
712 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
713 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
714 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
717 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
720 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
721 .create_flow_table = mlx5_cmd_create_flow_table,
722 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
723 .modify_flow_table = mlx5_cmd_modify_flow_table,
724 .create_flow_group = mlx5_cmd_create_flow_group,
725 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
726 .create_fte = mlx5_cmd_create_fte,
727 .update_fte = mlx5_cmd_update_fte,
728 .delete_fte = mlx5_cmd_delete_fte,
729 .update_root_ft = mlx5_cmd_update_root_ft,
732 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
733 .create_flow_table = mlx5_cmd_stub_create_flow_table,
734 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
735 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
736 .create_flow_group = mlx5_cmd_stub_create_flow_group,
737 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
738 .create_fte = mlx5_cmd_stub_create_fte,
739 .update_fte = mlx5_cmd_stub_update_fte,
740 .delete_fte = mlx5_cmd_stub_delete_fte,
741 .update_root_ft = mlx5_cmd_stub_update_root_ft,
744 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
746 return &mlx5_flow_cmds;
749 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
751 return &mlx5_flow_cmd_stubs;
754 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
758 case FS_FT_ESW_EGRESS_ACL:
759 case FS_FT_ESW_INGRESS_ACL:
761 case FS_FT_SNIFFER_RX:
762 case FS_FT_SNIFFER_TX:
763 return mlx5_fs_cmd_get_fw_cmds();
766 return mlx5_fs_cmd_get_stub_cmds();