1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
3 */
4
5 #include "ice_common.h"
6 #include "ice_flex_pipe.h"
7 #include "ice_protocol_type.h"
8 #include "ice_flow.h"
9
10 /* For supporting double VLAN mode, it is necessary to enable or disable certain
11 * boost tcam entries. The metadata labels names that match the following
12 * prefixes will be saved to allow enabling double VLAN mode.
13 */
14 #define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
15 #define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
16
17 /* To support tunneling entries by PF, the package will append the PF number to
18 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
19 */
20 #define ICE_TNL_PRE "TNL_"
21 static const struct ice_tunnel_type_scan tnls[] = {
22 { TNL_VXLAN, "TNL_VXLAN_PF" },
23 { TNL_GENEVE, "TNL_GENEVE_PF" },
24 { TNL_ECPRI, "TNL_UDP_ECPRI_PF" },
25 { TNL_LAST, "" }
26 };
27
28 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
29 /* SWITCH */
30 {
31 ICE_SID_XLT0_SW,
32 ICE_SID_XLT_KEY_BUILDER_SW,
33 ICE_SID_XLT1_SW,
34 ICE_SID_XLT2_SW,
35 ICE_SID_PROFID_TCAM_SW,
36 ICE_SID_PROFID_REDIR_SW,
37 ICE_SID_FLD_VEC_SW,
38 ICE_SID_CDID_KEY_BUILDER_SW,
39 ICE_SID_CDID_REDIR_SW
40 },
41
42 /* ACL */
43 {
44 ICE_SID_XLT0_ACL,
45 ICE_SID_XLT_KEY_BUILDER_ACL,
46 ICE_SID_XLT1_ACL,
47 ICE_SID_XLT2_ACL,
48 ICE_SID_PROFID_TCAM_ACL,
49 ICE_SID_PROFID_REDIR_ACL,
50 ICE_SID_FLD_VEC_ACL,
51 ICE_SID_CDID_KEY_BUILDER_ACL,
52 ICE_SID_CDID_REDIR_ACL
53 },
54
55 /* FD */
56 {
57 ICE_SID_XLT0_FD,
58 ICE_SID_XLT_KEY_BUILDER_FD,
59 ICE_SID_XLT1_FD,
60 ICE_SID_XLT2_FD,
61 ICE_SID_PROFID_TCAM_FD,
62 ICE_SID_PROFID_REDIR_FD,
63 ICE_SID_FLD_VEC_FD,
64 ICE_SID_CDID_KEY_BUILDER_FD,
65 ICE_SID_CDID_REDIR_FD
66 },
67
68 /* RSS */
69 {
70 ICE_SID_XLT0_RSS,
71 ICE_SID_XLT_KEY_BUILDER_RSS,
72 ICE_SID_XLT1_RSS,
73 ICE_SID_XLT2_RSS,
74 ICE_SID_PROFID_TCAM_RSS,
75 ICE_SID_PROFID_REDIR_RSS,
76 ICE_SID_FLD_VEC_RSS,
77 ICE_SID_CDID_KEY_BUILDER_RSS,
78 ICE_SID_CDID_REDIR_RSS
79 },
80
81 /* PE */
82 {
83 ICE_SID_XLT0_PE,
84 ICE_SID_XLT_KEY_BUILDER_PE,
85 ICE_SID_XLT1_PE,
86 ICE_SID_XLT2_PE,
87 ICE_SID_PROFID_TCAM_PE,
88 ICE_SID_PROFID_REDIR_PE,
89 ICE_SID_FLD_VEC_PE,
90 ICE_SID_CDID_KEY_BUILDER_PE,
91 ICE_SID_CDID_REDIR_PE
92 }
93 };
94
95 /**
96 * ice_sect_id - returns section ID
97 * @blk: block type
98 * @sect: section type
99 *
100 * This helper function returns the proper section ID given a block type and a
101 * section type.
102 */
ice_sect_id(enum ice_block blk,enum ice_sect sect)103 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
104 {
105 return ice_sect_lkup[blk][sect];
106 }
107
108 /**
109 * ice_pkg_val_buf
110 * @buf: pointer to the ice buffer
111 *
112 * This helper function validates a buffer's header.
113 */
ice_pkg_val_buf(struct ice_buf * buf)114 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
115 {
116 struct ice_buf_hdr *hdr;
117 u16 section_count;
118 u16 data_end;
119
120 hdr = (struct ice_buf_hdr *)buf->buf;
121 /* verify data */
122 section_count = LE16_TO_CPU(hdr->section_count);
123 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
124 return NULL;
125
126 data_end = LE16_TO_CPU(hdr->data_end);
127 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
128 return NULL;
129
130 return hdr;
131 }
132
133 /**
134 * ice_find_buf_table
135 * @ice_seg: pointer to the ice segment
136 *
137 * Returns the address of the buffer table within the ice segment.
138 */
ice_find_buf_table(struct ice_seg * ice_seg)139 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
140 {
141 struct ice_nvm_table *nvms;
142
143 nvms = (struct ice_nvm_table *)
144 (ice_seg->device_table +
145 LE32_TO_CPU(ice_seg->device_table_count));
146
147 return (_FORCE_ struct ice_buf_table *)
148 (nvms->vers + LE32_TO_CPU(nvms->table_count));
149 }
150
151 /**
152 * ice_pkg_enum_buf
153 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
154 * @state: pointer to the enum state
155 *
156 * This function will enumerate all the buffers in the ice segment. The first
157 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
158 * ice_seg is set to NULL which continues the enumeration. When the function
159 * returns a NULL pointer, then the end of the buffers has been reached, or an
160 * unexpected value has been detected (for example an invalid section count or
161 * an invalid buffer end value).
162 */
163 static struct ice_buf_hdr *
ice_pkg_enum_buf(struct ice_seg * ice_seg,struct ice_pkg_enum * state)164 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
165 {
166 if (ice_seg) {
167 state->buf_table = ice_find_buf_table(ice_seg);
168 if (!state->buf_table)
169 return NULL;
170
171 state->buf_idx = 0;
172 return ice_pkg_val_buf(state->buf_table->buf_array);
173 }
174
175 if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
176 return ice_pkg_val_buf(state->buf_table->buf_array +
177 state->buf_idx);
178 else
179 return NULL;
180 }
181
182 /**
183 * ice_pkg_advance_sect
184 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
185 * @state: pointer to the enum state
186 *
187 * This helper function will advance the section within the ice segment,
188 * also advancing the buffer if needed.
189 */
190 static bool
ice_pkg_advance_sect(struct ice_seg * ice_seg,struct ice_pkg_enum * state)191 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
192 {
193 if (!ice_seg && !state->buf)
194 return false;
195
196 if (!ice_seg && state->buf)
197 if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
198 return true;
199
200 state->buf = ice_pkg_enum_buf(ice_seg, state);
201 if (!state->buf)
202 return false;
203
204 /* start of new buffer, reset section index */
205 state->sect_idx = 0;
206 return true;
207 }
208
209 /**
210 * ice_pkg_enum_section
211 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
212 * @state: pointer to the enum state
213 * @sect_type: section type to enumerate
214 *
215 * This function will enumerate all the sections of a particular type in the
216 * ice segment. The first call is made with the ice_seg parameter non-NULL;
217 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
218 * When the function returns a NULL pointer, then the end of the matching
219 * sections has been reached.
220 */
221 void *
ice_pkg_enum_section(struct ice_seg * ice_seg,struct ice_pkg_enum * state,u32 sect_type)222 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
223 u32 sect_type)
224 {
225 u16 offset, size;
226
227 if (ice_seg)
228 state->type = sect_type;
229
230 if (!ice_pkg_advance_sect(ice_seg, state))
231 return NULL;
232
233 /* scan for next matching section */
234 while (state->buf->section_entry[state->sect_idx].type !=
235 CPU_TO_LE32(state->type))
236 if (!ice_pkg_advance_sect(NULL, state))
237 return NULL;
238
239 /* validate section */
240 offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
241 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
242 return NULL;
243
244 size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
245 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
246 return NULL;
247
248 /* make sure the section fits in the buffer */
249 if (offset + size > ICE_PKG_BUF_SIZE)
250 return NULL;
251
252 state->sect_type =
253 LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
254
255 /* calc pointer to this section */
256 state->sect = ((u8 *)state->buf) +
257 LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
258
259 return state->sect;
260 }
261
262 /**
263 * ice_pkg_enum_entry
264 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
265 * @state: pointer to the enum state
266 * @sect_type: section type to enumerate
267 * @offset: pointer to variable that receives the offset in the table (optional)
268 * @handler: function that handles access to the entries into the section type
269 *
270 * This function will enumerate all the entries in particular section type in
271 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
272 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
273 * When the function returns a NULL pointer, then the end of the entries has
274 * been reached.
275 *
276 * Since each section may have a different header and entry size, the handler
277 * function is needed to determine the number and location entries in each
278 * section.
279 *
280 * The offset parameter is optional, but should be used for sections that
281 * contain an offset for each section table. For such cases, the section handler
282 * function must return the appropriate offset + index to give the absolution
283 * offset for each entry. For example, if the base for a section's header
284 * indicates a base offset of 10, and the index for the entry is 2, then
285 * section handler function should set the offset to 10 + 2 = 12.
286 */
287 void *
ice_pkg_enum_entry(struct ice_seg * ice_seg,struct ice_pkg_enum * state,u32 sect_type,u32 * offset,void * (* handler)(u32 sect_type,void * section,u32 index,u32 * offset))288 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
289 u32 sect_type, u32 *offset,
290 void *(*handler)(u32 sect_type, void *section,
291 u32 index, u32 *offset))
292 {
293 void *entry;
294
295 if (ice_seg) {
296 if (!handler)
297 return NULL;
298
299 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
300 return NULL;
301
302 state->entry_idx = 0;
303 state->handler = handler;
304 } else {
305 state->entry_idx++;
306 }
307
308 if (!state->handler)
309 return NULL;
310
311 /* get entry */
312 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
313 offset);
314 if (!entry) {
315 /* end of a section, look for another section of this type */
316 if (!ice_pkg_enum_section(NULL, state, 0))
317 return NULL;
318
319 state->entry_idx = 0;
320 entry = state->handler(state->sect_type, state->sect,
321 state->entry_idx, offset);
322 }
323
324 return entry;
325 }
326
327 /**
328 * ice_hw_ptype_ena - check if the PTYPE is enabled or not
329 * @hw: pointer to the HW structure
330 * @ptype: the hardware PTYPE
331 */
ice_hw_ptype_ena(struct ice_hw * hw,u16 ptype)332 bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
333 {
334 return ptype < ICE_FLOW_PTYPE_MAX &&
335 ice_is_bit_set(hw->hw_ptype, ptype);
336 }
337
338 /**
339 * ice_marker_ptype_tcam_handler
340 * @sect_type: section type
341 * @section: pointer to section
342 * @index: index of the Marker PType TCAM entry to be returned
343 * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
344 *
345 * This is a callback function that can be passed to ice_pkg_enum_entry.
346 * Handles enumeration of individual Marker PType TCAM entries.
347 */
348 static void *
ice_marker_ptype_tcam_handler(u32 sect_type,void * section,u32 index,u32 * offset)349 ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
350 u32 *offset)
351 {
352 struct ice_marker_ptype_tcam_section *marker_ptype;
353
354 if (!section)
355 return NULL;
356
357 if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
358 return NULL;
359
360 if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
361 return NULL;
362
363 if (offset)
364 *offset = 0;
365
366 marker_ptype = (struct ice_marker_ptype_tcam_section *)section;
367 if (index >= LE16_TO_CPU(marker_ptype->count))
368 return NULL;
369
370 return marker_ptype->tcam + index;
371 }
372
373 /**
374 * ice_fill_hw_ptype - fill the enabled PTYPE bit information
375 * @hw: pointer to the HW structure
376 */
377 static void
ice_fill_hw_ptype(struct ice_hw * hw)378 ice_fill_hw_ptype(struct ice_hw *hw)
379 {
380 struct ice_marker_ptype_tcam_entry *tcam;
381 struct ice_seg *seg = hw->seg;
382 struct ice_pkg_enum state;
383
384 ice_zero_bitmap(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
385 if (!seg)
386 return;
387
388 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
389
390 do {
391 tcam = (struct ice_marker_ptype_tcam_entry *)
392 ice_pkg_enum_entry(seg, &state,
393 ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
394 ice_marker_ptype_tcam_handler);
395 if (tcam &&
396 LE16_TO_CPU(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
397 LE16_TO_CPU(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
398 ice_set_bit(LE16_TO_CPU(tcam->ptype), hw->hw_ptype);
399
400 seg = NULL;
401 } while (tcam);
402 }
403
404 /**
405 * ice_boost_tcam_handler
406 * @sect_type: section type
407 * @section: pointer to section
408 * @index: index of the boost TCAM entry to be returned
409 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
410 *
411 * This is a callback function that can be passed to ice_pkg_enum_entry.
412 * Handles enumeration of individual boost TCAM entries.
413 */
414 static void *
ice_boost_tcam_handler(u32 sect_type,void * section,u32 index,u32 * offset)415 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
416 {
417 struct ice_boost_tcam_section *boost;
418
419 if (!section)
420 return NULL;
421
422 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
423 return NULL;
424
425 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
426 return NULL;
427
428 if (offset)
429 *offset = 0;
430
431 boost = (struct ice_boost_tcam_section *)section;
432 if (index >= LE16_TO_CPU(boost->count))
433 return NULL;
434
435 return boost->tcam + index;
436 }
437
438 /**
439 * ice_find_boost_entry
440 * @ice_seg: pointer to the ice segment (non-NULL)
441 * @addr: Boost TCAM address of entry to search for
442 * @entry: returns pointer to the entry
443 *
444 * Finds a particular Boost TCAM entry and returns a pointer to that entry
445 * if it is found. The ice_seg parameter must not be NULL since the first call
446 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
447 */
448 static enum ice_status
ice_find_boost_entry(struct ice_seg * ice_seg,u16 addr,struct ice_boost_tcam_entry ** entry)449 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
450 struct ice_boost_tcam_entry **entry)
451 {
452 struct ice_boost_tcam_entry *tcam;
453 struct ice_pkg_enum state;
454
455 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
456
457 if (!ice_seg)
458 return ICE_ERR_PARAM;
459
460 do {
461 tcam = (struct ice_boost_tcam_entry *)
462 ice_pkg_enum_entry(ice_seg, &state,
463 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
464 ice_boost_tcam_handler);
465 if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
466 *entry = tcam;
467 return ICE_SUCCESS;
468 }
469
470 ice_seg = NULL;
471 } while (tcam);
472
473 *entry = NULL;
474 return ICE_ERR_CFG;
475 }
476
477 /**
478 * ice_label_enum_handler
479 * @sect_type: section type
480 * @section: pointer to section
481 * @index: index of the label entry to be returned
482 * @offset: pointer to receive absolute offset, always zero for label sections
483 *
484 * This is a callback function that can be passed to ice_pkg_enum_entry.
485 * Handles enumeration of individual label entries.
486 */
487 static void *
ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type,void * section,u32 index,u32 * offset)488 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
489 u32 *offset)
490 {
491 struct ice_label_section *labels;
492
493 if (!section)
494 return NULL;
495
496 if (index > ICE_MAX_LABELS_IN_BUF)
497 return NULL;
498
499 if (offset)
500 *offset = 0;
501
502 labels = (struct ice_label_section *)section;
503 if (index >= LE16_TO_CPU(labels->count))
504 return NULL;
505
506 return labels->label + index;
507 }
508
509 /**
510 * ice_enum_labels
511 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
512 * @type: the section type that will contain the label (0 on subsequent calls)
513 * @state: ice_pkg_enum structure that will hold the state of the enumeration
514 * @value: pointer to a value that will return the label's value if found
515 *
516 * Enumerates a list of labels in the package. The caller will call
517 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
518 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
519 * the end of the list has been reached.
520 */
521 static char *
ice_enum_labels(struct ice_seg * ice_seg,u32 type,struct ice_pkg_enum * state,u16 * value)522 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
523 u16 *value)
524 {
525 struct ice_label *label;
526
527 /* Check for valid label section on first call */
528 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
529 return NULL;
530
531 label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
532 NULL,
533 ice_label_enum_handler);
534 if (!label)
535 return NULL;
536
537 *value = LE16_TO_CPU(label->value);
538 return label->name;
539 }
540
541 /**
542 * ice_add_tunnel_hint
543 * @hw: pointer to the HW structure
544 * @label_name: label text
545 * @val: value of the tunnel port boost entry
546 */
ice_add_tunnel_hint(struct ice_hw * hw,char * label_name,u16 val)547 static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
548 {
549 if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
550 u16 i;
551
552 for (i = 0; tnls[i].type != TNL_LAST; i++) {
553 size_t len = strlen(tnls[i].label_prefix);
554
555 /* Look for matching label start, before continuing */
556 if (strncmp(label_name, tnls[i].label_prefix, len))
557 continue;
558
559 /* Make sure this label matches our PF. Note that the PF
560 * character ('0' - '7') will be located where our
561 * prefix string's null terminator is located.
562 */
563 if ((label_name[len] - '0') == hw->pf_id) {
564 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
565 hw->tnl.tbl[hw->tnl.count].valid = false;
566 hw->tnl.tbl[hw->tnl.count].in_use = false;
567 hw->tnl.tbl[hw->tnl.count].marked = false;
568 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
569 hw->tnl.tbl[hw->tnl.count].port = 0;
570 hw->tnl.count++;
571 break;
572 }
573 }
574 }
575 }
576
577 /**
578 * ice_add_dvm_hint
579 * @hw: pointer to the HW structure
580 * @val: value of the boost entry
581 * @enable: true if entry needs to be enabled, or false if needs to be disabled
582 */
ice_add_dvm_hint(struct ice_hw * hw,u16 val,bool enable)583 static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
584 {
585 if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
586 hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
587 hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
588 hw->dvm_upd.count++;
589 }
590 }
591
592 /**
593 * ice_init_pkg_hints
594 * @hw: pointer to the HW structure
595 * @ice_seg: pointer to the segment of the package scan (non-NULL)
596 *
597 * This function will scan the package and save off relevant information
598 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
599 * since the first call to ice_enum_labels requires a pointer to an actual
600 * ice_seg structure.
601 */
ice_init_pkg_hints(struct ice_hw * hw,struct ice_seg * ice_seg)602 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
603 {
604 struct ice_pkg_enum state;
605 char *label_name;
606 u16 val;
607 int i;
608
609 ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
610 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
611
612 if (!ice_seg)
613 return;
614
615 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
616 &val);
617
618 while (label_name) {
619 if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
620 /* check for a tunnel entry */
621 ice_add_tunnel_hint(hw, label_name, val);
622
623 /* check for a dvm mode entry */
624 else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
625 ice_add_dvm_hint(hw, val, true);
626
627 /* check for a svm mode entry */
628 else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
629 ice_add_dvm_hint(hw, val, false);
630
631 label_name = ice_enum_labels(NULL, 0, &state, &val);
632 }
633
634 /* Cache the appropriate boost TCAM entry pointers for tunnels */
635 for (i = 0; i < hw->tnl.count; i++) {
636 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
637 &hw->tnl.tbl[i].boost_entry);
638 if (hw->tnl.tbl[i].boost_entry)
639 hw->tnl.tbl[i].valid = true;
640 }
641
642 /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
643 for (i = 0; i < hw->dvm_upd.count; i++)
644 ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
645 &hw->dvm_upd.tbl[i].boost_entry);
646 }
647
648 /* Key creation */
649
650 #define ICE_DC_KEY 0x1 /* don't care */
651 #define ICE_DC_KEYINV 0x1
652 #define ICE_NM_KEY 0x0 /* never match */
653 #define ICE_NM_KEYINV 0x0
654 #define ICE_0_KEY 0x1 /* match 0 */
655 #define ICE_0_KEYINV 0x0
656 #define ICE_1_KEY 0x0 /* match 1 */
657 #define ICE_1_KEYINV 0x1
658
659 /**
660 * ice_gen_key_word - generate 16-bits of a key/mask word
661 * @val: the value
662 * @valid: valid bits mask (change only the valid bits)
663 * @dont_care: don't care mask
664 * @nvr_mtch: never match mask
665 * @key: pointer to an array of where the resulting key portion
666 * @key_inv: pointer to an array of where the resulting key invert portion
667 *
668 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
669 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
670 * of key and 8 bits of key invert.
671 *
672 * '0' = b01, always match a 0 bit
673 * '1' = b10, always match a 1 bit
674 * '?' = b11, don't care bit (always matches)
675 * '~' = b00, never match bit
676 *
677 * Input:
678 * val: b0 1 0 1 0 1
679 * dont_care: b0 0 1 1 0 0
680 * never_mtch: b0 0 0 0 1 1
681 * ------------------------------
682 * Result: key: b01 10 11 11 00 00
683 */
684 static enum ice_status
ice_gen_key_word(u8 val,u8 valid,u8 dont_care,u8 nvr_mtch,u8 * key,u8 * key_inv)685 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
686 u8 *key_inv)
687 {
688 u8 in_key = *key, in_key_inv = *key_inv;
689 u8 i;
690
691 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
692 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
693 return ICE_ERR_CFG;
694
695 *key = 0;
696 *key_inv = 0;
697
698 /* encode the 8 bits into 8-bit key and 8-bit key invert */
699 for (i = 0; i < 8; i++) {
700 *key >>= 1;
701 *key_inv >>= 1;
702
703 if (!(valid & 0x1)) { /* change only valid bits */
704 *key |= (in_key & 0x1) << 7;
705 *key_inv |= (in_key_inv & 0x1) << 7;
706 } else if (dont_care & 0x1) { /* don't care bit */
707 *key |= ICE_DC_KEY << 7;
708 *key_inv |= ICE_DC_KEYINV << 7;
709 } else if (nvr_mtch & 0x1) { /* never match bit */
710 *key |= ICE_NM_KEY << 7;
711 *key_inv |= ICE_NM_KEYINV << 7;
712 } else if (val & 0x01) { /* exact 1 match */
713 *key |= ICE_1_KEY << 7;
714 *key_inv |= ICE_1_KEYINV << 7;
715 } else { /* exact 0 match */
716 *key |= ICE_0_KEY << 7;
717 *key_inv |= ICE_0_KEYINV << 7;
718 }
719
720 dont_care >>= 1;
721 nvr_mtch >>= 1;
722 valid >>= 1;
723 val >>= 1;
724 in_key >>= 1;
725 in_key_inv >>= 1;
726 }
727
728 return ICE_SUCCESS;
729 }
730
731 /**
732 * ice_bits_max_set - determine if the number of bits set is within a maximum
733 * @mask: pointer to the byte array which is the mask
734 * @size: the number of bytes in the mask
735 * @max: the max number of set bits
736 *
737 * This function determines if there are at most 'max' number of bits set in an
738 * array. Returns true if the number for bits set is <= max or will return false
739 * otherwise.
740 */
ice_bits_max_set(const u8 * mask,u16 size,u16 max)741 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
742 {
743 u16 count = 0;
744 u16 i;
745
746 /* check each byte */
747 for (i = 0; i < size; i++) {
748 /* if 0, go to next byte */
749 if (!mask[i])
750 continue;
751
752 /* We know there is at least one set bit in this byte because of
753 * the above check; if we already have found 'max' number of
754 * bits set, then we can return failure now.
755 */
756 if (count == max)
757 return false;
758
759 /* count the bits in this byte, checking threshold */
760 count += ice_hweight8(mask[i]);
761 if (count > max)
762 return false;
763 }
764
765 return true;
766 }
767
768 /**
769 * ice_set_key - generate a variable sized key with multiples of 16-bits
770 * @key: pointer to where the key will be stored
771 * @size: the size of the complete key in bytes (must be even)
772 * @val: array of 8-bit values that makes up the value portion of the key
773 * @upd: array of 8-bit masks that determine what key portion to update
774 * @dc: array of 8-bit masks that make up the don't care mask
775 * @nm: array of 8-bit masks that make up the never match mask
776 * @off: the offset of the first byte in the key to update
777 * @len: the number of bytes in the key update
778 *
779 * This function generates a key from a value, a don't care mask and a never
780 * match mask.
781 * upd, dc, and nm are optional parameters, and can be NULL:
782 * upd == NULL --> upd mask is all 1's (update all bits)
783 * dc == NULL --> dc mask is all 0's (no don't care bits)
784 * nm == NULL --> nm mask is all 0's (no never match bits)
785 */
786 enum ice_status
ice_set_key(u8 * key,u16 size,u8 * val,u8 * upd,u8 * dc,u8 * nm,u16 off,u16 len)787 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
788 u16 len)
789 {
790 u16 half_size;
791 u16 i;
792
793 /* size must be a multiple of 2 bytes. */
794 if (size % 2)
795 return ICE_ERR_CFG;
796 half_size = size / 2;
797
798 if (off + len > half_size)
799 return ICE_ERR_CFG;
800
801 /* Make sure at most one bit is set in the never match mask. Having more
802 * than one never match mask bit set will cause HW to consume excessive
803 * power otherwise; this is a power management efficiency check.
804 */
805 #define ICE_NVR_MTCH_BITS_MAX 1
806 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
807 return ICE_ERR_CFG;
808
809 for (i = 0; i < len; i++)
810 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
811 dc ? dc[i] : 0, nm ? nm[i] : 0,
812 key + off + i, key + half_size + off + i))
813 return ICE_ERR_CFG;
814
815 return ICE_SUCCESS;
816 }
817
818 /**
819 * ice_acquire_global_cfg_lock
820 * @hw: pointer to the HW structure
821 * @access: access type (read or write)
822 *
823 * This function will request ownership of the global config lock for reading
824 * or writing of the package. When attempting to obtain write access, the
825 * caller must check for the following two return values:
826 *
827 * ICE_SUCCESS - Means the caller has acquired the global config lock
828 * and can perform writing of the package.
829 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
830 * package or has found that no update was necessary; in
831 * this case, the caller can just skip performing any
832 * update of the package.
833 */
834 static enum ice_status
ice_acquire_global_cfg_lock(struct ice_hw * hw,enum ice_aq_res_access_type access)835 ice_acquire_global_cfg_lock(struct ice_hw *hw,
836 enum ice_aq_res_access_type access)
837 {
838 enum ice_status status;
839
840 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
841
842 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
843 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
844
845 if (status == ICE_ERR_AQ_NO_WORK)
846 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
847
848 return status;
849 }
850
851 /**
852 * ice_release_global_cfg_lock
853 * @hw: pointer to the HW structure
854 *
855 * This function will release the global config lock.
856 */
ice_release_global_cfg_lock(struct ice_hw * hw)857 static void ice_release_global_cfg_lock(struct ice_hw *hw)
858 {
859 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
860 }
861
862 /**
863 * ice_acquire_change_lock
864 * @hw: pointer to the HW structure
865 * @access: access type (read or write)
866 *
867 * This function will request ownership of the change lock.
868 */
869 enum ice_status
ice_acquire_change_lock(struct ice_hw * hw,enum ice_aq_res_access_type access)870 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
871 {
872 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
873
874 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
875 ICE_CHANGE_LOCK_TIMEOUT);
876 }
877
878 /**
879 * ice_release_change_lock
880 * @hw: pointer to the HW structure
881 *
882 * This function will release the change lock using the proper Admin Command.
883 */
ice_release_change_lock(struct ice_hw * hw)884 void ice_release_change_lock(struct ice_hw *hw)
885 {
886 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
887
888 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
889 }
890
891 /**
892 * ice_aq_download_pkg
893 * @hw: pointer to the hardware structure
894 * @pkg_buf: the package buffer to transfer
895 * @buf_size: the size of the package buffer
896 * @last_buf: last buffer indicator
897 * @error_offset: returns error offset
898 * @error_info: returns error information
899 * @cd: pointer to command details structure or NULL
900 *
901 * Download Package (0x0C40)
902 */
903 static enum ice_status
ice_aq_download_pkg(struct ice_hw * hw,struct ice_buf_hdr * pkg_buf,u16 buf_size,bool last_buf,u32 * error_offset,u32 * error_info,struct ice_sq_cd * cd)904 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
905 u16 buf_size, bool last_buf, u32 *error_offset,
906 u32 *error_info, struct ice_sq_cd *cd)
907 {
908 struct ice_aqc_download_pkg *cmd;
909 struct ice_aq_desc desc;
910 enum ice_status status;
911
912 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
913
914 if (error_offset)
915 *error_offset = 0;
916 if (error_info)
917 *error_info = 0;
918
919 cmd = &desc.params.download_pkg;
920 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
921 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
922
923 if (last_buf)
924 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
925
926 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
927 if (status == ICE_ERR_AQ_ERROR) {
928 /* Read error from buffer only when the FW returned an error */
929 struct ice_aqc_download_pkg_resp *resp;
930
931 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
932 if (error_offset)
933 *error_offset = LE32_TO_CPU(resp->error_offset);
934 if (error_info)
935 *error_info = LE32_TO_CPU(resp->error_info);
936 }
937
938 return status;
939 }
940
941 /**
942 * ice_aq_upload_section
943 * @hw: pointer to the hardware structure
944 * @pkg_buf: the package buffer which will receive the section
945 * @buf_size: the size of the package buffer
946 * @cd: pointer to command details structure or NULL
947 *
948 * Upload Section (0x0C41)
949 */
950 enum ice_status
ice_aq_upload_section(struct ice_hw * hw,struct ice_buf_hdr * pkg_buf,u16 buf_size,struct ice_sq_cd * cd)951 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
952 u16 buf_size, struct ice_sq_cd *cd)
953 {
954 struct ice_aq_desc desc;
955
956 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
957 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
958 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
959
960 return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
961 }
962
963 /**
964 * ice_aq_update_pkg
965 * @hw: pointer to the hardware structure
966 * @pkg_buf: the package cmd buffer
967 * @buf_size: the size of the package cmd buffer
968 * @last_buf: last buffer indicator
969 * @error_offset: returns error offset
970 * @error_info: returns error information
971 * @cd: pointer to command details structure or NULL
972 *
973 * Update Package (0x0C42)
974 */
975 static enum ice_status
ice_aq_update_pkg(struct ice_hw * hw,struct ice_buf_hdr * pkg_buf,u16 buf_size,bool last_buf,u32 * error_offset,u32 * error_info,struct ice_sq_cd * cd)976 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
977 bool last_buf, u32 *error_offset, u32 *error_info,
978 struct ice_sq_cd *cd)
979 {
980 struct ice_aqc_download_pkg *cmd;
981 struct ice_aq_desc desc;
982 enum ice_status status;
983
984 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
985
986 if (error_offset)
987 *error_offset = 0;
988 if (error_info)
989 *error_info = 0;
990
991 cmd = &desc.params.download_pkg;
992 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
993 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
994
995 if (last_buf)
996 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
997
998 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
999 if (status == ICE_ERR_AQ_ERROR) {
1000 /* Read error from buffer only when the FW returned an error */
1001 struct ice_aqc_download_pkg_resp *resp;
1002
1003 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
1004 if (error_offset)
1005 *error_offset = LE32_TO_CPU(resp->error_offset);
1006 if (error_info)
1007 *error_info = LE32_TO_CPU(resp->error_info);
1008 }
1009
1010 return status;
1011 }
1012
1013 /**
1014 * ice_find_seg_in_pkg
1015 * @hw: pointer to the hardware structure
1016 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
1017 * @pkg_hdr: pointer to the package header to be searched
1018 *
1019 * This function searches a package file for a particular segment type. On
1020 * success it returns a pointer to the segment header, otherwise it will
1021 * return NULL.
1022 */
1023 static struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw * hw,u32 seg_type,struct ice_pkg_hdr * pkg_hdr)1024 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
1025 struct ice_pkg_hdr *pkg_hdr)
1026 {
1027 u32 i;
1028
1029 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1030 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
1031 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
1032 pkg_hdr->pkg_format_ver.update,
1033 pkg_hdr->pkg_format_ver.draft);
1034
1035 /* Search all package segments for the requested segment type */
1036 for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
1037 struct ice_generic_seg_hdr *seg;
1038
1039 seg = (struct ice_generic_seg_hdr *)
1040 ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
1041
1042 if (LE32_TO_CPU(seg->seg_type) == seg_type)
1043 return seg;
1044 }
1045
1046 return NULL;
1047 }
1048
1049 /**
1050 * ice_update_pkg_no_lock
1051 * @hw: pointer to the hardware structure
1052 * @bufs: pointer to an array of buffers
1053 * @count: the number of buffers in the array
1054 */
1055 static enum ice_status
ice_update_pkg_no_lock(struct ice_hw * hw,struct ice_buf * bufs,u32 count)1056 ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1057 {
1058 enum ice_status status = ICE_SUCCESS;
1059 u32 i;
1060
1061 for (i = 0; i < count; i++) {
1062 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
1063 bool last = ((i + 1) == count);
1064 u32 offset, info;
1065
1066 status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
1067 last, &offset, &info, NULL);
1068
1069 if (status) {
1070 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
1071 status, offset, info);
1072 break;
1073 }
1074 }
1075
1076 return status;
1077 }
1078
1079 /**
1080 * ice_update_pkg
1081 * @hw: pointer to the hardware structure
1082 * @bufs: pointer to an array of buffers
1083 * @count: the number of buffers in the array
1084 *
1085 * Obtains change lock and updates package.
1086 */
1087 enum ice_status
ice_update_pkg(struct ice_hw * hw,struct ice_buf * bufs,u32 count)1088 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1089 {
1090 enum ice_status status;
1091
1092 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
1093 if (status)
1094 return status;
1095
1096 status = ice_update_pkg_no_lock(hw, bufs, count);
1097
1098 ice_release_change_lock(hw);
1099
1100 return status;
1101 }
1102
1103 /**
1104 * ice_dwnld_cfg_bufs
1105 * @hw: pointer to the hardware structure
1106 * @bufs: pointer to an array of buffers
1107 * @count: the number of buffers in the array
1108 *
1109 * Obtains global config lock and downloads the package configuration buffers
1110 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
1111 * found indicates that the rest of the buffers are all metadata buffers.
1112 */
1113 static enum ice_status
ice_dwnld_cfg_bufs(struct ice_hw * hw,struct ice_buf * bufs,u32 count)1114 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1115 {
1116 enum ice_status status;
1117 struct ice_buf_hdr *bh;
1118 u32 offset, info, i;
1119
1120 if (!bufs || !count)
1121 return ICE_ERR_PARAM;
1122
1123 /* If the first buffer's first section has its metadata bit set
1124 * then there are no buffers to be downloaded, and the operation is
1125 * considered a success.
1126 */
1127 bh = (struct ice_buf_hdr *)bufs;
1128 if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
1129 return ICE_SUCCESS;
1130
1131 /* reset pkg_dwnld_status in case this function is called in the
1132 * reset/rebuild flow
1133 */
1134 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
1135
1136 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
1137 if (status) {
1138 if (status == ICE_ERR_AQ_NO_WORK)
1139 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
1140 else
1141 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1142 return status;
1143 }
1144
1145 for (i = 0; i < count; i++) {
1146 bool last = ((i + 1) == count);
1147
1148 if (!last) {
1149 /* check next buffer for metadata flag */
1150 bh = (struct ice_buf_hdr *)(bufs + i + 1);
1151
1152 /* A set metadata flag in the next buffer will signal
1153 * that the current buffer will be the last buffer
1154 * downloaded
1155 */
1156 if (LE16_TO_CPU(bh->section_count))
1157 if (LE32_TO_CPU(bh->section_entry[0].type) &
1158 ICE_METADATA_BUF)
1159 last = true;
1160 }
1161
1162 bh = (struct ice_buf_hdr *)(bufs + i);
1163
1164 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
1165 &offset, &info, NULL);
1166
1167 /* Save AQ status from download package */
1168 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
1169 if (status) {
1170 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
1171 status, offset, info);
1172 break;
1173 }
1174
1175 if (last)
1176 break;
1177 }
1178
1179 if (!status) {
1180 status = ice_set_vlan_mode(hw);
1181 if (status)
1182 ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
1183 status);
1184 }
1185
1186 ice_release_global_cfg_lock(hw);
1187
1188 return status;
1189 }
1190
1191 /**
1192 * ice_aq_get_pkg_info_list
1193 * @hw: pointer to the hardware structure
1194 * @pkg_info: the buffer which will receive the information list
1195 * @buf_size: the size of the pkg_info information buffer
1196 * @cd: pointer to command details structure or NULL
1197 *
1198 * Get Package Info List (0x0C43)
1199 */
1200 static enum ice_status
ice_aq_get_pkg_info_list(struct ice_hw * hw,struct ice_aqc_get_pkg_info_resp * pkg_info,u16 buf_size,struct ice_sq_cd * cd)1201 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1202 struct ice_aqc_get_pkg_info_resp *pkg_info,
1203 u16 buf_size, struct ice_sq_cd *cd)
1204 {
1205 struct ice_aq_desc desc;
1206
1207 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1208 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1209
1210 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1211 }
1212
1213 /**
1214 * ice_download_pkg
1215 * @hw: pointer to the hardware structure
1216 * @ice_seg: pointer to the segment of the package to be downloaded
1217 *
1218 * Handles the download of a complete package.
1219 */
1220 static enum ice_status
ice_download_pkg(struct ice_hw * hw,struct ice_seg * ice_seg)1221 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1222 {
1223 struct ice_buf_table *ice_buf_tbl;
1224 enum ice_status status;
1225
1226 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1227 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1228 ice_seg->hdr.seg_format_ver.major,
1229 ice_seg->hdr.seg_format_ver.minor,
1230 ice_seg->hdr.seg_format_ver.update,
1231 ice_seg->hdr.seg_format_ver.draft);
1232
1233 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1234 LE32_TO_CPU(ice_seg->hdr.seg_type),
1235 LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1236
1237 ice_buf_tbl = ice_find_buf_table(ice_seg);
1238
1239 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1240 LE32_TO_CPU(ice_buf_tbl->buf_count));
1241
1242 status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1243 LE32_TO_CPU(ice_buf_tbl->buf_count));
1244
1245 ice_post_pkg_dwnld_vlan_mode_cfg(hw);
1246
1247 return status;
1248 }
1249
1250 /**
1251 * ice_init_pkg_info
1252 * @hw: pointer to the hardware structure
1253 * @pkg_hdr: pointer to the driver's package hdr
1254 *
1255 * Saves off the package details into the HW structure.
1256 */
1257 static enum ice_status
ice_init_pkg_info(struct ice_hw * hw,struct ice_pkg_hdr * pkg_hdr)1258 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1259 {
1260 struct ice_generic_seg_hdr *seg_hdr;
1261
1262 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1263 if (!pkg_hdr)
1264 return ICE_ERR_PARAM;
1265
1266 hw->pkg_seg_id = SEGMENT_TYPE_ICE_E810;
1267
1268 ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
1269 hw->pkg_seg_id);
1270
1271 seg_hdr = (struct ice_generic_seg_hdr *)
1272 ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
1273 if (seg_hdr) {
1274 struct ice_meta_sect *meta;
1275 struct ice_pkg_enum state;
1276
1277 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1278
1279 /* Get package information from the Metadata Section */
1280 meta = (struct ice_meta_sect *)
1281 ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1282 ICE_SID_METADATA);
1283 if (!meta) {
1284 ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1285 return ICE_ERR_CFG;
1286 }
1287
1288 hw->pkg_ver = meta->ver;
1289 ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
1290 ICE_NONDMA_TO_NONDMA);
1291
1292 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1293 meta->ver.major, meta->ver.minor, meta->ver.update,
1294 meta->ver.draft, meta->name);
1295
1296 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1297 ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1298 sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
1299
1300 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1301 seg_hdr->seg_format_ver.major,
1302 seg_hdr->seg_format_ver.minor,
1303 seg_hdr->seg_format_ver.update,
1304 seg_hdr->seg_format_ver.draft,
1305 seg_hdr->seg_id);
1306 } else {
1307 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1308 return ICE_ERR_CFG;
1309 }
1310
1311 return ICE_SUCCESS;
1312 }
1313
1314 /**
1315 * ice_get_pkg_info
1316 * @hw: pointer to the hardware structure
1317 *
1318 * Store details of the package currently loaded in HW into the HW structure.
1319 */
ice_get_pkg_info(struct ice_hw * hw)1320 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1321 {
1322 struct ice_aqc_get_pkg_info_resp *pkg_info;
1323 enum ice_status status;
1324 u16 size;
1325 u32 i;
1326
1327 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1328
1329 size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1330 pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1331 if (!pkg_info)
1332 return ICE_ERR_NO_MEMORY;
1333
1334 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1335 if (status)
1336 goto init_pkg_free_alloc;
1337
1338 for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
1339 #define ICE_PKG_FLAG_COUNT 4
1340 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1341 u8 place = 0;
1342
1343 if (pkg_info->pkg_info[i].is_active) {
1344 flags[place++] = 'A';
1345 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1346 hw->active_track_id =
1347 LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
1348 ice_memcpy(hw->active_pkg_name,
1349 pkg_info->pkg_info[i].name,
1350 sizeof(pkg_info->pkg_info[i].name),
1351 ICE_NONDMA_TO_NONDMA);
1352 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1353 }
1354 if (pkg_info->pkg_info[i].is_active_at_boot)
1355 flags[place++] = 'B';
1356 if (pkg_info->pkg_info[i].is_modified)
1357 flags[place++] = 'M';
1358 if (pkg_info->pkg_info[i].is_in_nvm)
1359 flags[place++] = 'N';
1360
1361 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1362 i, pkg_info->pkg_info[i].ver.major,
1363 pkg_info->pkg_info[i].ver.minor,
1364 pkg_info->pkg_info[i].ver.update,
1365 pkg_info->pkg_info[i].ver.draft,
1366 pkg_info->pkg_info[i].name, flags);
1367 }
1368
1369 init_pkg_free_alloc:
1370 ice_free(hw, pkg_info);
1371
1372 return status;
1373 }
1374
1375 /**
1376 * ice_verify_pkg - verify package
1377 * @pkg: pointer to the package buffer
1378 * @len: size of the package buffer
1379 *
1380 * Verifies various attributes of the package file, including length, format
1381 * version, and the requirement of at least one segment.
1382 */
ice_verify_pkg(struct ice_pkg_hdr * pkg,u32 len)1383 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1384 {
1385 u32 seg_count;
1386 u32 i;
1387
1388 if (len < ice_struct_size(pkg, seg_offset, 1))
1389 return ICE_ERR_BUF_TOO_SHORT;
1390
1391 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1392 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1393 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1394 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1395 return ICE_ERR_CFG;
1396
1397 /* pkg must have at least one segment */
1398 seg_count = LE32_TO_CPU(pkg->seg_count);
1399 if (seg_count < 1)
1400 return ICE_ERR_CFG;
1401
1402 /* make sure segment array fits in package length */
1403 if (len < ice_struct_size(pkg, seg_offset, seg_count))
1404 return ICE_ERR_BUF_TOO_SHORT;
1405
1406 /* all segments must fit within length */
1407 for (i = 0; i < seg_count; i++) {
1408 u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1409 struct ice_generic_seg_hdr *seg;
1410
1411 /* segment header must fit */
1412 if (len < off + sizeof(*seg))
1413 return ICE_ERR_BUF_TOO_SHORT;
1414
1415 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1416
1417 /* segment body must fit */
1418 if (len < off + LE32_TO_CPU(seg->seg_size))
1419 return ICE_ERR_BUF_TOO_SHORT;
1420 }
1421
1422 return ICE_SUCCESS;
1423 }
1424
1425 /**
1426 * ice_free_seg - free package segment pointer
1427 * @hw: pointer to the hardware structure
1428 *
1429 * Frees the package segment pointer in the proper manner, depending on if the
1430 * segment was allocated or just the passed in pointer was stored.
1431 */
ice_free_seg(struct ice_hw * hw)1432 void ice_free_seg(struct ice_hw *hw)
1433 {
1434 if (hw->pkg_copy) {
1435 ice_free(hw, hw->pkg_copy);
1436 hw->pkg_copy = NULL;
1437 hw->pkg_size = 0;
1438 }
1439 hw->seg = NULL;
1440 }
1441
1442 /**
1443 * ice_init_pkg_regs - initialize additional package registers
1444 * @hw: pointer to the hardware structure
1445 */
ice_init_pkg_regs(struct ice_hw * hw)1446 static void ice_init_pkg_regs(struct ice_hw *hw)
1447 {
1448 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1449 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1450 #define ICE_SW_BLK_IDX 0
1451 if (hw->dcf_enabled)
1452 return;
1453
1454 /* setup Switch block input mask, which is 48-bits in two parts */
1455 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1456 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1457 }
1458
1459 /**
1460 * ice_chk_pkg_version - check package version for compatibility with driver
1461 * @pkg_ver: pointer to a version structure to check
1462 *
1463 * Check to make sure that the package about to be downloaded is compatible with
1464 * the driver. To be compatible, the major and minor components of the package
1465 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1466 * definitions.
1467 */
ice_chk_pkg_version(struct ice_pkg_ver * pkg_ver)1468 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1469 {
1470 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1471 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1472 return ICE_ERR_NOT_SUPPORTED;
1473
1474 return ICE_SUCCESS;
1475 }
1476
1477 /**
1478 * ice_chk_pkg_compat
1479 * @hw: pointer to the hardware structure
1480 * @ospkg: pointer to the package hdr
1481 * @seg: pointer to the package segment hdr
1482 *
1483 * This function checks the package version compatibility with driver and NVM
1484 */
1485 static enum ice_status
ice_chk_pkg_compat(struct ice_hw * hw,struct ice_pkg_hdr * ospkg,struct ice_seg ** seg)1486 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1487 struct ice_seg **seg)
1488 {
1489 struct ice_aqc_get_pkg_info_resp *pkg;
1490 enum ice_status status;
1491 u16 size;
1492 u32 i;
1493
1494 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1495
1496 /* Check package version compatibility */
1497 status = ice_chk_pkg_version(&hw->pkg_ver);
1498 if (status) {
1499 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1500 return status;
1501 }
1502
1503 /* find ICE segment in given package */
1504 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
1505 ospkg);
1506 if (!*seg) {
1507 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1508 return ICE_ERR_CFG;
1509 }
1510
1511 /* Check if FW is compatible with the OS package */
1512 size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
1513 pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1514 if (!pkg)
1515 return ICE_ERR_NO_MEMORY;
1516
1517 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1518 if (status)
1519 goto fw_ddp_compat_free_alloc;
1520
1521 for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
1522 /* loop till we find the NVM package */
1523 if (!pkg->pkg_info[i].is_in_nvm)
1524 continue;
1525 if ((*seg)->hdr.seg_format_ver.major !=
1526 pkg->pkg_info[i].ver.major ||
1527 (*seg)->hdr.seg_format_ver.minor >
1528 pkg->pkg_info[i].ver.minor) {
1529 status = ICE_ERR_FW_DDP_MISMATCH;
1530 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1531 }
1532 /* done processing NVM package so break */
1533 break;
1534 }
1535 fw_ddp_compat_free_alloc:
1536 ice_free(hw, pkg);
1537 return status;
1538 }
1539
1540 /**
1541 * ice_sw_fv_handler
1542 * @sect_type: section type
1543 * @section: pointer to section
1544 * @index: index of the field vector entry to be returned
1545 * @offset: ptr to variable that receives the offset in the field vector table
1546 *
1547 * This is a callback function that can be passed to ice_pkg_enum_entry.
1548 * This function treats the given section as of type ice_sw_fv_section and
1549 * enumerates offset field. "offset" is an index into the field vector table.
1550 */
1551 static void *
ice_sw_fv_handler(u32 sect_type,void * section,u32 index,u32 * offset)1552 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1553 {
1554 struct ice_sw_fv_section *fv_section =
1555 (struct ice_sw_fv_section *)section;
1556
1557 if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1558 return NULL;
1559 if (index >= LE16_TO_CPU(fv_section->count))
1560 return NULL;
1561 if (offset)
1562 /* "index" passed in to this function is relative to a given
1563 * 4k block. To get to the true index into the field vector
1564 * table need to add the relative index to the base_offset
1565 * field of this section
1566 */
1567 *offset = LE16_TO_CPU(fv_section->base_offset) + index;
1568 return fv_section->fv + index;
1569 }
1570
1571 /**
1572 * ice_get_prof_index_max - get the max profile index for used profile
1573 * @hw: pointer to the HW struct
1574 *
1575 * Calling this function will get the max profile index for used profile
1576 * and store the index number in struct ice_switch_info *switch_info
1577 * in hw for following use.
1578 */
ice_get_prof_index_max(struct ice_hw * hw)1579 static int ice_get_prof_index_max(struct ice_hw *hw)
1580 {
1581 u16 prof_index = 0, j, max_prof_index = 0;
1582 struct ice_pkg_enum state;
1583 struct ice_seg *ice_seg;
1584 bool flag = false;
1585 struct ice_fv *fv;
1586 u32 offset;
1587
1588 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1589
1590 if (!hw->seg)
1591 return ICE_ERR_PARAM;
1592
1593 ice_seg = hw->seg;
1594
1595 do {
1596 fv = (struct ice_fv *)
1597 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1598 &offset, ice_sw_fv_handler);
1599 if (!fv)
1600 break;
1601 ice_seg = NULL;
1602
1603 /* in the profile that not be used, the prot_id is set to 0xff
1604 * and the off is set to 0x1ff for all the field vectors.
1605 */
1606 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1607 if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1608 fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1609 flag = true;
1610 if (flag && prof_index > max_prof_index)
1611 max_prof_index = prof_index;
1612
1613 prof_index++;
1614 flag = false;
1615 } while (fv);
1616
1617 hw->switch_info->max_used_prof_index = max_prof_index;
1618
1619 return ICE_SUCCESS;
1620 }
1621
1622 /**
1623 * ice_init_pkg - initialize/download package
1624 * @hw: pointer to the hardware structure
1625 * @buf: pointer to the package buffer
1626 * @len: size of the package buffer
1627 *
1628 * This function initializes a package. The package contains HW tables
1629 * required to do packet processing. First, the function extracts package
1630 * information such as version. Then it finds the ice configuration segment
1631 * within the package; this function then saves a copy of the segment pointer
1632 * within the supplied package buffer. Next, the function will cache any hints
1633 * from the package, followed by downloading the package itself. Note, that if
1634 * a previous PF driver has already downloaded the package successfully, then
1635 * the current driver will not have to download the package again.
1636 *
1637 * The local package contents will be used to query default behavior and to
1638 * update specific sections of the HW's version of the package (e.g. to update
1639 * the parse graph to understand new protocols).
1640 *
1641 * This function stores a pointer to the package buffer memory, and it is
1642 * expected that the supplied buffer will not be freed immediately. If the
1643 * package buffer needs to be freed, such as when read from a file, use
1644 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1645 * case.
1646 */
ice_init_pkg(struct ice_hw * hw,u8 * buf,u32 len)1647 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1648 {
1649 struct ice_pkg_hdr *pkg;
1650 enum ice_status status;
1651 struct ice_seg *seg;
1652
1653 if (!buf || !len)
1654 return ICE_ERR_PARAM;
1655
1656 pkg = (struct ice_pkg_hdr *)buf;
1657 status = ice_verify_pkg(pkg, len);
1658 if (status) {
1659 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1660 status);
1661 return status;
1662 }
1663
1664 /* initialize package info */
1665 status = ice_init_pkg_info(hw, pkg);
1666 if (status)
1667 return status;
1668
1669 /* before downloading the package, check package version for
1670 * compatibility with driver
1671 */
1672 status = ice_chk_pkg_compat(hw, pkg, &seg);
1673 if (status)
1674 return status;
1675
1676 /* initialize package hints and then download package */
1677 ice_init_pkg_hints(hw, seg);
1678 status = ice_download_pkg(hw, seg);
1679 if (status == ICE_ERR_AQ_NO_WORK) {
1680 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1681 status = ICE_SUCCESS;
1682 }
1683
1684 /* Get information on the package currently loaded in HW, then make sure
1685 * the driver is compatible with this version.
1686 */
1687 if (!status) {
1688 status = ice_get_pkg_info(hw);
1689 if (!status)
1690 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1691 }
1692
1693 if (!status) {
1694 hw->seg = seg;
1695 /* on successful package download update other required
1696 * registers to support the package and fill HW tables
1697 * with package content.
1698 */
1699 ice_init_pkg_regs(hw);
1700 ice_fill_blk_tbls(hw);
1701 ice_fill_hw_ptype(hw);
1702 ice_get_prof_index_max(hw);
1703 } else {
1704 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1705 status);
1706 }
1707
1708 return status;
1709 }
1710
1711 /**
1712 * ice_copy_and_init_pkg - initialize/download a copy of the package
1713 * @hw: pointer to the hardware structure
1714 * @buf: pointer to the package buffer
1715 * @len: size of the package buffer
1716 *
1717 * This function copies the package buffer, and then calls ice_init_pkg() to
1718 * initialize the copied package contents.
1719 *
1720 * The copying is necessary if the package buffer supplied is constant, or if
1721 * the memory may disappear shortly after calling this function.
1722 *
1723 * If the package buffer resides in the data segment and can be modified, the
1724 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1725 *
1726 * However, if the package buffer needs to be copied first, such as when being
1727 * read from a file, the caller should use ice_copy_and_init_pkg().
1728 *
1729 * This function will first copy the package buffer, before calling
1730 * ice_init_pkg(). The caller is free to immediately destroy the original
1731 * package buffer, as the new copy will be managed by this function and
1732 * related routines.
1733 */
ice_copy_and_init_pkg(struct ice_hw * hw,const u8 * buf,u32 len)1734 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1735 {
1736 enum ice_status status;
1737 u8 *buf_copy;
1738
1739 if (!buf || !len)
1740 return ICE_ERR_PARAM;
1741
1742 buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1743
1744 status = ice_init_pkg(hw, buf_copy, len);
1745 if (status) {
1746 /* Free the copy, since we failed to initialize the package */
1747 ice_free(hw, buf_copy);
1748 } else {
1749 /* Track the copied pkg so we can free it later */
1750 hw->pkg_copy = buf_copy;
1751 hw->pkg_size = len;
1752 }
1753
1754 return status;
1755 }
1756
1757 /**
1758 * ice_pkg_buf_alloc
1759 * @hw: pointer to the HW structure
1760 *
1761 * Allocates a package buffer and returns a pointer to the buffer header.
1762 * Note: all package contents must be in Little Endian form.
1763 */
ice_pkg_buf_alloc(struct ice_hw * hw)1764 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1765 {
1766 struct ice_buf_build *bld;
1767 struct ice_buf_hdr *buf;
1768
1769 bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1770 if (!bld)
1771 return NULL;
1772
1773 buf = (struct ice_buf_hdr *)bld;
1774 buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
1775 section_entry));
1776 return bld;
1777 }
1778
1779 /**
1780 * ice_get_sw_prof_type - determine switch profile type
1781 * @hw: pointer to the HW structure
1782 * @fv: pointer to the switch field vector
1783 */
1784 static enum ice_prof_type
ice_get_sw_prof_type(struct ice_hw * hw,struct ice_fv * fv)1785 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
1786 {
1787 u16 i;
1788 bool valid_prof = false;
1789
1790 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1791 if (fv->ew[i].off != ICE_NAN_OFFSET)
1792 valid_prof = true;
1793
1794 /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1795 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1796 fv->ew[i].off == ICE_VNI_OFFSET)
1797 return ICE_PROF_TUN_UDP;
1798
1799 /* GRE tunnel will have GRE protocol */
1800 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1801 return ICE_PROF_TUN_GRE;
1802
1803 /* PPPOE tunnel will have PPPOE protocol */
1804 if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
1805 return ICE_PROF_TUN_PPPOE;
1806 }
1807
1808 return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
1809 }
1810
1811 /**
1812 * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1813 * @hw: pointer to hardware structure
1814 * @req_profs: type of profiles requested
1815 * @bm: pointer to memory for returning the bitmap of field vectors
1816 */
1817 void
ice_get_sw_fv_bitmap(struct ice_hw * hw,enum ice_prof_type req_profs,ice_bitmap_t * bm)1818 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1819 ice_bitmap_t *bm)
1820 {
1821 struct ice_pkg_enum state;
1822 struct ice_seg *ice_seg;
1823 struct ice_fv *fv;
1824
1825 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1826 ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1827 ice_seg = hw->seg;
1828 do {
1829 enum ice_prof_type prof_type;
1830 u32 offset;
1831
1832 fv = (struct ice_fv *)
1833 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1834 &offset, ice_sw_fv_handler);
1835 ice_seg = NULL;
1836
1837 if (fv) {
1838 /* Determine field vector type */
1839 prof_type = ice_get_sw_prof_type(hw, fv);
1840
1841 if (req_profs & prof_type)
1842 ice_set_bit((u16)offset, bm);
1843 }
1844 } while (fv);
1845 }
1846
1847 /**
1848 * ice_get_sw_fv_list
1849 * @hw: pointer to the HW structure
1850 * @prot_ids: field vector to search for with a given protocol ID
1851 * @ids_cnt: lookup/protocol count
1852 * @bm: bitmap of field vectors to consider
1853 * @fv_list: Head of a list
1854 *
1855 * Finds all the field vector entries from switch block that contain
1856 * a given protocol ID and returns a list of structures of type
1857 * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1858 * definition and profile ID information
1859 * NOTE: The caller of the function is responsible for freeing the memory
1860 * allocated for every list entry.
1861 */
1862 enum ice_status
ice_get_sw_fv_list(struct ice_hw * hw,u8 * prot_ids,u16 ids_cnt,ice_bitmap_t * bm,struct LIST_HEAD_TYPE * fv_list)1863 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
1864 ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1865 {
1866 struct ice_sw_fv_list_entry *fvl;
1867 struct ice_sw_fv_list_entry *tmp;
1868 struct ice_pkg_enum state;
1869 struct ice_seg *ice_seg;
1870 struct ice_fv *fv;
1871 u32 offset;
1872
1873 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1874
1875 if (!ids_cnt || !hw->seg)
1876 return ICE_ERR_PARAM;
1877
1878 ice_seg = hw->seg;
1879 do {
1880 u16 i;
1881
1882 fv = (struct ice_fv *)
1883 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1884 &offset, ice_sw_fv_handler);
1885 if (!fv)
1886 break;
1887 ice_seg = NULL;
1888
1889 /* If field vector is not in the bitmap list, then skip this
1890 * profile.
1891 */
1892 if (!ice_is_bit_set(bm, (u16)offset))
1893 continue;
1894
1895 for (i = 0; i < ids_cnt; i++) {
1896 int j;
1897
1898 /* This code assumes that if a switch field vector line
1899 * has a matching protocol, then this line will contain
1900 * the entries necessary to represent every field in
1901 * that protocol header.
1902 */
1903 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1904 if (fv->ew[j].prot_id == prot_ids[i])
1905 break;
1906 if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1907 break;
1908 if (i + 1 == ids_cnt) {
1909 fvl = (struct ice_sw_fv_list_entry *)
1910 ice_malloc(hw, sizeof(*fvl));
1911 if (!fvl)
1912 goto err;
1913 fvl->fv_ptr = fv;
1914 fvl->profile_id = offset;
1915 LIST_ADD(&fvl->list_entry, fv_list);
1916 break;
1917 }
1918 }
1919 } while (fv);
1920 if (LIST_EMPTY(fv_list))
1921 return ICE_ERR_CFG;
1922 return ICE_SUCCESS;
1923
1924 err:
1925 LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1926 list_entry) {
1927 LIST_DEL(&fvl->list_entry);
1928 ice_free(hw, fvl);
1929 }
1930
1931 return ICE_ERR_NO_MEMORY;
1932 }
1933
1934 /**
1935 * ice_init_prof_result_bm - Initialize the profile result index bitmap
1936 * @hw: pointer to hardware structure
1937 */
ice_init_prof_result_bm(struct ice_hw * hw)1938 void ice_init_prof_result_bm(struct ice_hw *hw)
1939 {
1940 struct ice_pkg_enum state;
1941 struct ice_seg *ice_seg;
1942 struct ice_fv *fv;
1943
1944 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1945
1946 if (!hw->seg)
1947 return;
1948
1949 ice_seg = hw->seg;
1950 do {
1951 u32 off;
1952 u16 i;
1953
1954 fv = (struct ice_fv *)
1955 ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1956 &off, ice_sw_fv_handler);
1957 ice_seg = NULL;
1958 if (!fv)
1959 break;
1960
1961 ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1962 ICE_MAX_FV_WORDS);
1963
1964 /* Determine empty field vector indices, these can be
1965 * used for recipe results. Skip index 0, since it is
1966 * always used for Switch ID.
1967 */
1968 for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1969 if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1970 fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1971 ice_set_bit(i,
1972 hw->switch_info->prof_res_bm[off]);
1973 } while (fv);
1974 }
1975
1976 /**
1977 * ice_pkg_buf_free
1978 * @hw: pointer to the HW structure
1979 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1980 *
1981 * Frees a package buffer
1982 */
ice_pkg_buf_free(struct ice_hw * hw,struct ice_buf_build * bld)1983 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1984 {
1985 ice_free(hw, bld);
1986 }
1987
1988 /**
1989 * ice_pkg_buf_reserve_section
1990 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1991 * @count: the number of sections to reserve
1992 *
1993 * Reserves one or more section table entries in a package buffer. This routine
1994 * can be called multiple times as long as they are made before calling
1995 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1996 * is called once, the number of sections that can be allocated will not be able
1997 * to be increased; not using all reserved sections is fine, but this will
1998 * result in some wasted space in the buffer.
1999 * Note: all package contents must be in Little Endian form.
2000 */
2001 static enum ice_status
ice_pkg_buf_reserve_section(struct ice_buf_build * bld,u16 count)2002 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
2003 {
2004 struct ice_buf_hdr *buf;
2005 u16 section_count;
2006 u16 data_end;
2007
2008 if (!bld)
2009 return ICE_ERR_PARAM;
2010
2011 buf = (struct ice_buf_hdr *)&bld->buf;
2012
2013 /* already an active section, can't increase table size */
2014 section_count = LE16_TO_CPU(buf->section_count);
2015 if (section_count > 0)
2016 return ICE_ERR_CFG;
2017
2018 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
2019 return ICE_ERR_CFG;
2020 bld->reserved_section_table_entries += count;
2021
2022 data_end = LE16_TO_CPU(buf->data_end) +
2023 FLEX_ARRAY_SIZE(buf, section_entry, count);
2024 buf->data_end = CPU_TO_LE16(data_end);
2025
2026 return ICE_SUCCESS;
2027 }
2028
2029 /**
2030 * ice_pkg_buf_alloc_section
2031 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2032 * @type: the section type value
2033 * @size: the size of the section to reserve (in bytes)
2034 *
2035 * Reserves memory in the buffer for a section's content and updates the
2036 * buffers' status accordingly. This routine returns a pointer to the first
2037 * byte of the section start within the buffer, which is used to fill in the
2038 * section contents.
2039 * Note: all package contents must be in Little Endian form.
2040 */
2041 static void *
ice_pkg_buf_alloc_section(struct ice_buf_build * bld,u32 type,u16 size)2042 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
2043 {
2044 struct ice_buf_hdr *buf;
2045 u16 sect_count;
2046 u16 data_end;
2047
2048 if (!bld || !type || !size)
2049 return NULL;
2050
2051 buf = (struct ice_buf_hdr *)&bld->buf;
2052
2053 /* check for enough space left in buffer */
2054 data_end = LE16_TO_CPU(buf->data_end);
2055
2056 /* section start must align on 4 byte boundary */
2057 data_end = ICE_ALIGN(data_end, 4);
2058
2059 if ((data_end + size) > ICE_MAX_S_DATA_END)
2060 return NULL;
2061
2062 /* check for more available section table entries */
2063 sect_count = LE16_TO_CPU(buf->section_count);
2064 if (sect_count < bld->reserved_section_table_entries) {
2065 void *section_ptr = ((u8 *)buf) + data_end;
2066
2067 buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
2068 buf->section_entry[sect_count].size = CPU_TO_LE16(size);
2069 buf->section_entry[sect_count].type = CPU_TO_LE32(type);
2070
2071 data_end += size;
2072 buf->data_end = CPU_TO_LE16(data_end);
2073
2074 buf->section_count = CPU_TO_LE16(sect_count + 1);
2075 return section_ptr;
2076 }
2077
2078 /* no free section table entries */
2079 return NULL;
2080 }
2081
2082 /**
2083 * ice_pkg_buf_alloc_single_section
2084 * @hw: pointer to the HW structure
2085 * @type: the section type value
2086 * @size: the size of the section to reserve (in bytes)
2087 * @section: returns pointer to the section
2088 *
2089 * Allocates a package buffer with a single section.
2090 * Note: all package contents must be in Little Endian form.
2091 */
2092 struct ice_buf_build *
ice_pkg_buf_alloc_single_section(struct ice_hw * hw,u32 type,u16 size,void ** section)2093 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
2094 void **section)
2095 {
2096 struct ice_buf_build *buf;
2097
2098 if (!section)
2099 return NULL;
2100
2101 buf = ice_pkg_buf_alloc(hw);
2102 if (!buf)
2103 return NULL;
2104
2105 if (ice_pkg_buf_reserve_section(buf, 1))
2106 goto ice_pkg_buf_alloc_single_section_err;
2107
2108 *section = ice_pkg_buf_alloc_section(buf, type, size);
2109 if (!*section)
2110 goto ice_pkg_buf_alloc_single_section_err;
2111
2112 return buf;
2113
2114 ice_pkg_buf_alloc_single_section_err:
2115 ice_pkg_buf_free(hw, buf);
2116 return NULL;
2117 }
2118
2119 /**
2120 * ice_pkg_buf_get_active_sections
2121 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2122 *
2123 * Returns the number of active sections. Before using the package buffer
2124 * in an update package command, the caller should make sure that there is at
2125 * least one active section - otherwise, the buffer is not legal and should
2126 * not be used.
2127 * Note: all package contents must be in Little Endian form.
2128 */
ice_pkg_buf_get_active_sections(struct ice_buf_build * bld)2129 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
2130 {
2131 struct ice_buf_hdr *buf;
2132
2133 if (!bld)
2134 return 0;
2135
2136 buf = (struct ice_buf_hdr *)&bld->buf;
2137 return LE16_TO_CPU(buf->section_count);
2138 }
2139
2140 /**
2141 * ice_pkg_buf
2142 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
2143 *
2144 * Return a pointer to the buffer's header
2145 */
ice_pkg_buf(struct ice_buf_build * bld)2146 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
2147 {
2148 if (!bld)
2149 return NULL;
2150
2151 return &bld->buf;
2152 }
2153
2154 /**
2155 * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
2156 * @hw: pointer to the HW structure
2157 * @port: port to search for
2158 * @index: optionally returns index
2159 *
2160 * Returns whether a port is already in use as a tunnel, and optionally its
2161 * index
2162 */
ice_tunnel_port_in_use_hlpr(struct ice_hw * hw,u16 port,u16 * index)2163 static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
2164 {
2165 u16 i;
2166
2167 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2168 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2169 if (index)
2170 *index = i;
2171 return true;
2172 }
2173
2174 return false;
2175 }
2176
2177 /**
2178 * ice_tunnel_port_in_use
2179 * @hw: pointer to the HW structure
2180 * @port: port to search for
2181 * @index: optionally returns index
2182 *
2183 * Returns whether a port is already in use as a tunnel, and optionally its
2184 * index
2185 */
ice_tunnel_port_in_use(struct ice_hw * hw,u16 port,u16 * index)2186 bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
2187 {
2188 bool res;
2189
2190 ice_acquire_lock(&hw->tnl_lock);
2191 res = ice_tunnel_port_in_use_hlpr(hw, port, index);
2192 ice_release_lock(&hw->tnl_lock);
2193
2194 return res;
2195 }
2196
2197 /**
2198 * ice_tunnel_get_type
2199 * @hw: pointer to the HW structure
2200 * @port: port to search for
2201 * @type: returns tunnel index
2202 *
2203 * For a given port number, will return the type of tunnel.
2204 */
2205 bool
ice_tunnel_get_type(struct ice_hw * hw,u16 port,enum ice_tunnel_type * type)2206 ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
2207 {
2208 bool res = false;
2209 u16 i;
2210
2211 ice_acquire_lock(&hw->tnl_lock);
2212
2213 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2214 if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
2215 *type = hw->tnl.tbl[i].type;
2216 res = true;
2217 break;
2218 }
2219
2220 ice_release_lock(&hw->tnl_lock);
2221
2222 return res;
2223 }
2224
2225 /**
2226 * ice_find_free_tunnel_entry
2227 * @hw: pointer to the HW structure
2228 * @type: tunnel type
2229 * @index: optionally returns index
2230 *
2231 * Returns whether there is a free tunnel entry, and optionally its index
2232 */
2233 static bool
ice_find_free_tunnel_entry(struct ice_hw * hw,enum ice_tunnel_type type,u16 * index)2234 ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
2235 u16 *index)
2236 {
2237 u16 i;
2238
2239 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2240 if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
2241 hw->tnl.tbl[i].type == type) {
2242 if (index)
2243 *index = i;
2244 return true;
2245 }
2246
2247 return false;
2248 }
2249
2250 /**
2251 * ice_get_open_tunnel_port - retrieve an open tunnel port
2252 * @hw: pointer to the HW structure
2253 * @type: tunnel type (TNL_ALL will return any open port)
2254 * @port: returns open port
2255 */
2256 bool
ice_get_open_tunnel_port(struct ice_hw * hw,enum ice_tunnel_type type,u16 * port)2257 ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
2258 u16 *port)
2259 {
2260 bool res = false;
2261 u16 i;
2262
2263 ice_acquire_lock(&hw->tnl_lock);
2264
2265 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2266 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2267 (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
2268 *port = hw->tnl.tbl[i].port;
2269 res = true;
2270 break;
2271 }
2272
2273 ice_release_lock(&hw->tnl_lock);
2274
2275 return res;
2276 }
2277
2278 /**
2279 * ice_upd_dvm_boost_entry
2280 * @hw: pointer to the HW structure
2281 * @entry: pointer to double vlan boost entry info
2282 */
2283 static enum ice_status
ice_upd_dvm_boost_entry(struct ice_hw * hw,struct ice_dvm_entry * entry)2284 ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
2285 {
2286 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2287 enum ice_status status = ICE_ERR_MAX_LIMIT;
2288 struct ice_buf_build *bld;
2289 u8 val, dc, nm;
2290
2291 bld = ice_pkg_buf_alloc(hw);
2292 if (!bld)
2293 return ICE_ERR_NO_MEMORY;
2294
2295 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2296 if (ice_pkg_buf_reserve_section(bld, 2))
2297 goto ice_upd_dvm_boost_entry_err;
2298
2299 sect_rx = (struct ice_boost_tcam_section *)
2300 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2301 ice_struct_size(sect_rx, tcam, 1));
2302 if (!sect_rx)
2303 goto ice_upd_dvm_boost_entry_err;
2304 sect_rx->count = CPU_TO_LE16(1);
2305
2306 sect_tx = (struct ice_boost_tcam_section *)
2307 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2308 ice_struct_size(sect_tx, tcam, 1));
2309 if (!sect_tx)
2310 goto ice_upd_dvm_boost_entry_err;
2311 sect_tx->count = CPU_TO_LE16(1);
2312
2313 /* copy original boost entry to update package buffer */
2314 ice_memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam),
2315 ICE_NONDMA_TO_NONDMA);
2316
2317 /* re-write the don't care and never match bits accordingly */
2318 if (entry->enable) {
2319 /* all bits are don't care */
2320 val = 0x00;
2321 dc = 0xFF;
2322 nm = 0x00;
2323 } else {
2324 /* disable, one never match bit, the rest are don't care */
2325 val = 0x00;
2326 dc = 0xF7;
2327 nm = 0x08;
2328 }
2329
2330 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2331 &val, NULL, &dc, &nm, 0, sizeof(u8));
2332
2333 /* exact copy of entry to Tx section entry */
2334 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
2335 ICE_NONDMA_TO_NONDMA);
2336
2337 status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1);
2338
2339 ice_upd_dvm_boost_entry_err:
2340 ice_pkg_buf_free(hw, bld);
2341
2342 return status;
2343 }
2344
2345 /**
2346 * ice_set_dvm_boost_entries
2347 * @hw: pointer to the HW structure
2348 *
2349 * Enable double vlan by updating the appropriate boost tcam entries.
2350 */
ice_set_dvm_boost_entries(struct ice_hw * hw)2351 enum ice_status ice_set_dvm_boost_entries(struct ice_hw *hw)
2352 {
2353 enum ice_status status;
2354 u16 i;
2355
2356 for (i = 0; i < hw->dvm_upd.count; i++) {
2357 status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
2358 if (status)
2359 return status;
2360 }
2361
2362 return ICE_SUCCESS;
2363 }
2364
2365 /**
2366 * ice_create_tunnel
2367 * @hw: pointer to the HW structure
2368 * @type: type of tunnel
2369 * @port: port of tunnel to create
2370 *
2371 * Create a tunnel by updating the parse graph in the parser. We do that by
2372 * creating a package buffer with the tunnel info and issuing an update package
2373 * command.
2374 */
2375 enum ice_status
ice_create_tunnel(struct ice_hw * hw,enum ice_tunnel_type type,u16 port)2376 ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
2377 {
2378 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2379 enum ice_status status = ICE_ERR_MAX_LIMIT;
2380 struct ice_buf_build *bld;
2381 u16 index;
2382
2383 ice_acquire_lock(&hw->tnl_lock);
2384
2385 if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
2386 hw->tnl.tbl[index].ref++;
2387 status = ICE_SUCCESS;
2388 goto ice_create_tunnel_end;
2389 }
2390
2391 if (!ice_find_free_tunnel_entry(hw, type, &index)) {
2392 status = ICE_ERR_OUT_OF_RANGE;
2393 goto ice_create_tunnel_end;
2394 }
2395
2396 bld = ice_pkg_buf_alloc(hw);
2397 if (!bld) {
2398 status = ICE_ERR_NO_MEMORY;
2399 goto ice_create_tunnel_end;
2400 }
2401
2402 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2403 if (ice_pkg_buf_reserve_section(bld, 2))
2404 goto ice_create_tunnel_err;
2405
2406 sect_rx = (struct ice_boost_tcam_section *)
2407 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2408 ice_struct_size(sect_rx, tcam, 1));
2409 if (!sect_rx)
2410 goto ice_create_tunnel_err;
2411 sect_rx->count = CPU_TO_LE16(1);
2412
2413 sect_tx = (struct ice_boost_tcam_section *)
2414 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2415 ice_struct_size(sect_tx, tcam, 1));
2416 if (!sect_tx)
2417 goto ice_create_tunnel_err;
2418 sect_tx->count = CPU_TO_LE16(1);
2419
2420 /* copy original boost entry to update package buffer */
2421 ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2422 sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
2423
2424 /* over-write the never-match dest port key bits with the encoded port
2425 * bits
2426 */
2427 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2428 (u8 *)&port, NULL, NULL, NULL,
2429 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
2430 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
2431
2432 /* exact copy of entry to Tx section entry */
2433 ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
2434 ICE_NONDMA_TO_NONDMA);
2435
2436 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2437 if (!status) {
2438 hw->tnl.tbl[index].port = port;
2439 hw->tnl.tbl[index].in_use = true;
2440 hw->tnl.tbl[index].ref = 1;
2441 }
2442
2443 ice_create_tunnel_err:
2444 ice_pkg_buf_free(hw, bld);
2445
2446 ice_create_tunnel_end:
2447 ice_release_lock(&hw->tnl_lock);
2448
2449 return status;
2450 }
2451
2452 /**
2453 * ice_destroy_tunnel
2454 * @hw: pointer to the HW structure
2455 * @port: port of tunnel to destroy (ignored if the all parameter is true)
2456 * @all: flag that states to destroy all tunnels
2457 *
2458 * Destroys a tunnel or all tunnels by creating an update package buffer
2459 * targeting the specific updates requested and then performing an update
2460 * package.
2461 */
ice_destroy_tunnel(struct ice_hw * hw,u16 port,bool all)2462 enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
2463 {
2464 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2465 enum ice_status status = ICE_ERR_MAX_LIMIT;
2466 struct ice_buf_build *bld;
2467 u16 count = 0;
2468 u16 index;
2469 u16 size;
2470 u16 i, j;
2471
2472 ice_acquire_lock(&hw->tnl_lock);
2473
2474 if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
2475 if (hw->tnl.tbl[index].ref > 1) {
2476 hw->tnl.tbl[index].ref--;
2477 status = ICE_SUCCESS;
2478 goto ice_destroy_tunnel_end;
2479 }
2480
2481 /* determine count */
2482 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2483 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2484 (all || hw->tnl.tbl[i].port == port))
2485 count++;
2486
2487 if (!count) {
2488 status = ICE_ERR_PARAM;
2489 goto ice_destroy_tunnel_end;
2490 }
2491
2492 /* size of section - there is at least one entry */
2493 size = ice_struct_size(sect_rx, tcam, count);
2494
2495 bld = ice_pkg_buf_alloc(hw);
2496 if (!bld) {
2497 status = ICE_ERR_NO_MEMORY;
2498 goto ice_destroy_tunnel_end;
2499 }
2500
2501 /* allocate 2 sections, one for Rx parser, one for Tx parser */
2502 if (ice_pkg_buf_reserve_section(bld, 2))
2503 goto ice_destroy_tunnel_err;
2504
2505 sect_rx = (struct ice_boost_tcam_section *)
2506 ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2507 size);
2508 if (!sect_rx)
2509 goto ice_destroy_tunnel_err;
2510 sect_rx->count = CPU_TO_LE16(count);
2511
2512 sect_tx = (struct ice_boost_tcam_section *)
2513 ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2514 size);
2515 if (!sect_tx)
2516 goto ice_destroy_tunnel_err;
2517 sect_tx->count = CPU_TO_LE16(count);
2518
2519 /* copy original boost entry to update package buffer, one copy to Rx
2520 * section, another copy to the Tx section
2521 */
2522 for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2523 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
2524 (all || hw->tnl.tbl[i].port == port)) {
2525 ice_memcpy(sect_rx->tcam + j,
2526 hw->tnl.tbl[i].boost_entry,
2527 sizeof(*sect_rx->tcam),
2528 ICE_NONDMA_TO_NONDMA);
2529 ice_memcpy(sect_tx->tcam + j,
2530 hw->tnl.tbl[i].boost_entry,
2531 sizeof(*sect_tx->tcam),
2532 ICE_NONDMA_TO_NONDMA);
2533 hw->tnl.tbl[i].marked = true;
2534 j++;
2535 }
2536
2537 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2538 if (!status)
2539 for (i = 0; i < hw->tnl.count &&
2540 i < ICE_TUNNEL_MAX_ENTRIES; i++)
2541 if (hw->tnl.tbl[i].marked) {
2542 hw->tnl.tbl[i].ref = 0;
2543 hw->tnl.tbl[i].port = 0;
2544 hw->tnl.tbl[i].in_use = false;
2545 hw->tnl.tbl[i].marked = false;
2546 }
2547
2548 ice_destroy_tunnel_err:
2549 ice_pkg_buf_free(hw, bld);
2550
2551 ice_destroy_tunnel_end:
2552 ice_release_lock(&hw->tnl_lock);
2553
2554 return status;
2555 }
2556
2557 /**
2558 * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
2559 * @hw: pointer to the hardware structure
2560 * @blk: hardware block
2561 * @prof: profile ID
2562 * @fv_idx: field vector word index
2563 * @prot: variable to receive the protocol ID
2564 * @off: variable to receive the protocol offset
2565 */
2566 enum ice_status
ice_find_prot_off(struct ice_hw * hw,enum ice_block blk,u8 prof,u16 fv_idx,u8 * prot,u16 * off)2567 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
2568 u8 *prot, u16 *off)
2569 {
2570 struct ice_fv_word *fv_ext;
2571
2572 if (prof >= hw->blk[blk].es.count)
2573 return ICE_ERR_PARAM;
2574
2575 if (fv_idx >= hw->blk[blk].es.fvw)
2576 return ICE_ERR_PARAM;
2577
2578 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2579
2580 *prot = fv_ext[fv_idx].prot_id;
2581 *off = fv_ext[fv_idx].off;
2582
2583 return ICE_SUCCESS;
2584 }
2585
2586 /* PTG Management */
2587
2588 /**
2589 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
2590 * @hw: pointer to the hardware structure
2591 * @blk: HW block
2592 * @ptype: the ptype to search for
2593 * @ptg: pointer to variable that receives the PTG
2594 *
2595 * This function will search the PTGs for a particular ptype, returning the
2596 * PTG ID that contains it through the PTG parameter, with the value of
2597 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
2598 */
2599 static enum ice_status
ice_ptg_find_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 * ptg)2600 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2601 {
2602 if (ptype >= ICE_XLT1_CNT || !ptg)
2603 return ICE_ERR_PARAM;
2604
2605 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2606 return ICE_SUCCESS;
2607 }
2608
2609 /**
2610 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
2611 * @hw: pointer to the hardware structure
2612 * @blk: HW block
2613 * @ptg: the PTG to allocate
2614 *
2615 * This function allocates a given packet type group ID specified by the PTG
2616 * parameter.
2617 */
ice_ptg_alloc_val(struct ice_hw * hw,enum ice_block blk,u8 ptg)2618 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2619 {
2620 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2621 }
2622
2623 /**
2624 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
2625 * @hw: pointer to the hardware structure
2626 * @blk: HW block
2627 * @ptype: the ptype to remove
2628 * @ptg: the PTG to remove the ptype from
2629 *
2630 * This function will remove the ptype from the specific PTG, and move it to
2631 * the default PTG (ICE_DEFAULT_PTG).
2632 */
2633 static enum ice_status
ice_ptg_remove_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 ptg)2634 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2635 {
2636 struct ice_ptg_ptype **ch;
2637 struct ice_ptg_ptype *p;
2638
2639 if (ptype > ICE_XLT1_CNT - 1)
2640 return ICE_ERR_PARAM;
2641
2642 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2643 return ICE_ERR_DOES_NOT_EXIST;
2644
2645 /* Should not happen if .in_use is set, bad config */
2646 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2647 return ICE_ERR_CFG;
2648
2649 /* find the ptype within this PTG, and bypass the link over it */
2650 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2651 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2652 while (p) {
2653 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2654 *ch = p->next_ptype;
2655 break;
2656 }
2657
2658 ch = &p->next_ptype;
2659 p = p->next_ptype;
2660 }
2661
2662 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2663 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2664
2665 return ICE_SUCCESS;
2666 }
2667
2668 /**
2669 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
2670 * @hw: pointer to the hardware structure
2671 * @blk: HW block
2672 * @ptype: the ptype to add or move
2673 * @ptg: the PTG to add or move the ptype to
2674 *
2675 * This function will either add or move a ptype to a particular PTG depending
2676 * on if the ptype is already part of another group. Note that using a
2677 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
2678 * default PTG.
2679 */
2680 static enum ice_status
ice_ptg_add_mv_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 ptg)2681 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2682 {
2683 enum ice_status status;
2684 u8 original_ptg;
2685
2686 if (ptype > ICE_XLT1_CNT - 1)
2687 return ICE_ERR_PARAM;
2688
2689 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2690 return ICE_ERR_DOES_NOT_EXIST;
2691
2692 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2693 if (status)
2694 return status;
2695
2696 /* Is ptype already in the correct PTG? */
2697 if (original_ptg == ptg)
2698 return ICE_SUCCESS;
2699
2700 /* Remove from original PTG and move back to the default PTG */
2701 if (original_ptg != ICE_DEFAULT_PTG)
2702 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2703
2704 /* Moving to default PTG? Then we're done with this request */
2705 if (ptg == ICE_DEFAULT_PTG)
2706 return ICE_SUCCESS;
2707
2708 /* Add ptype to PTG at beginning of list */
2709 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2710 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2711 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2712 &hw->blk[blk].xlt1.ptypes[ptype];
2713
2714 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2715 hw->blk[blk].xlt1.t[ptype] = ptg;
2716
2717 return ICE_SUCCESS;
2718 }
2719
2720 /* Block / table size info */
2721 struct ice_blk_size_details {
2722 u16 xlt1; /* # XLT1 entries */
2723 u16 xlt2; /* # XLT2 entries */
2724 u16 prof_tcam; /* # profile ID TCAM entries */
2725 u16 prof_id; /* # profile IDs */
2726 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
2727 u16 prof_redir; /* # profile redirection entries */
2728 u16 es; /* # extraction sequence entries */
2729 u16 fvw; /* # field vector words */
2730 u8 overwrite; /* overwrite existing entries allowed */
2731 u8 reverse; /* reverse FV order */
2732 };
2733
2734 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2735 /**
2736 * Table Definitions
2737 * XLT1 - Number of entries in XLT1 table
2738 * XLT2 - Number of entries in XLT2 table
2739 * TCAM - Number of entries Profile ID TCAM table
2740 * CDID - Control Domain ID of the hardware block
2741 * PRED - Number of entries in the Profile Redirection Table
2742 * FV - Number of entries in the Field Vector
2743 * FVW - Width (in WORDs) of the Field Vector
2744 * OVR - Overwrite existing table entries
2745 * REV - Reverse FV
2746 */
2747 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
2748 /* Overwrite , Reverse FV */
2749 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2750 false, false },
2751 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2752 false, false },
2753 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2754 false, true },
2755 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2756 true, true },
2757 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2758 false, false },
2759 };
2760
2761 enum ice_sid_all {
2762 ICE_SID_XLT1_OFF = 0,
2763 ICE_SID_XLT2_OFF,
2764 ICE_SID_PR_OFF,
2765 ICE_SID_PR_REDIR_OFF,
2766 ICE_SID_ES_OFF,
2767 ICE_SID_OFF_COUNT,
2768 };
2769
2770 /* Characteristic handling */
2771
2772 /**
2773 * ice_match_prop_lst - determine if properties of two lists match
2774 * @list1: first properties list
2775 * @list2: second properties list
2776 *
2777 * Count, cookies and the order must match in order to be considered equivalent.
2778 */
2779 static bool
ice_match_prop_lst(struct LIST_HEAD_TYPE * list1,struct LIST_HEAD_TYPE * list2)2780 ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
2781 {
2782 struct ice_vsig_prof *tmp1;
2783 struct ice_vsig_prof *tmp2;
2784 u16 chk_count = 0;
2785 u16 count = 0;
2786
2787 /* compare counts */
2788 LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list)
2789 count++;
2790 LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
2791 chk_count++;
2792 if (!count || count != chk_count)
2793 return false;
2794
2795 tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
2796 tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
2797
2798 /* profile cookies must compare, and in the exact same order to take
2799 * into account priority
2800 */
2801 while (count--) {
2802 if (tmp2->profile_cookie != tmp1->profile_cookie)
2803 return false;
2804
2805 tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
2806 tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
2807 }
2808
2809 return true;
2810 }
2811
2812 /* VSIG Management */
2813
2814 /**
2815 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2816 * @hw: pointer to the hardware structure
2817 * @blk: HW block
2818 * @vsi: VSI of interest
2819 * @vsig: pointer to receive the VSI group
2820 *
2821 * This function will lookup the VSI entry in the XLT2 list and return
2822 * the VSI group its associated with.
2823 */
2824 enum ice_status
ice_vsig_find_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 * vsig)2825 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2826 {
2827 if (!vsig || vsi >= ICE_MAX_VSI)
2828 return ICE_ERR_PARAM;
2829
2830 /* As long as there's a default or valid VSIG associated with the input
2831 * VSI, the functions returns a success. Any handling of VSIG will be
2832 * done by the following add, update or remove functions.
2833 */
2834 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2835
2836 return ICE_SUCCESS;
2837 }
2838
2839 /**
2840 * ice_vsig_alloc_val - allocate a new VSIG by value
2841 * @hw: pointer to the hardware structure
2842 * @blk: HW block
2843 * @vsig: the VSIG to allocate
2844 *
2845 * This function will allocate a given VSIG specified by the VSIG parameter.
2846 */
ice_vsig_alloc_val(struct ice_hw * hw,enum ice_block blk,u16 vsig)2847 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2848 {
2849 u16 idx = vsig & ICE_VSIG_IDX_M;
2850
2851 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2852 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2853 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2854 }
2855
2856 return ICE_VSIG_VALUE(idx, hw->pf_id);
2857 }
2858
2859 /**
2860 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2861 * @hw: pointer to the hardware structure
2862 * @blk: HW block
2863 *
2864 * This function will iterate through the VSIG list and mark the first
2865 * unused entry for the new VSIG entry as used and return that value.
2866 */
ice_vsig_alloc(struct ice_hw * hw,enum ice_block blk)2867 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2868 {
2869 u16 i;
2870
2871 for (i = 1; i < ICE_MAX_VSIGS; i++)
2872 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2873 return ice_vsig_alloc_val(hw, blk, i);
2874
2875 return ICE_DEFAULT_VSIG;
2876 }
2877
2878 /**
2879 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2880 * @hw: pointer to the hardware structure
2881 * @blk: HW block
2882 * @chs: characteristic list
2883 * @vsig: returns the VSIG with the matching profiles, if found
2884 *
2885 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2886 * a group have the same characteristic set. To check if there exists a VSIG
2887 * which has the same characteristics as the input characteristics; this
2888 * function will iterate through the XLT2 list and return the VSIG that has a
2889 * matching configuration. In order to make sure that priorities are accounted
2890 * for, the list must match exactly, including the order in which the
2891 * characteristics are listed.
2892 */
2893 static enum ice_status
ice_find_dup_props_vsig(struct ice_hw * hw,enum ice_block blk,struct LIST_HEAD_TYPE * chs,u16 * vsig)2894 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2895 struct LIST_HEAD_TYPE *chs, u16 *vsig)
2896 {
2897 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2898 u16 i;
2899
2900 for (i = 0; i < xlt2->count; i++)
2901 if (xlt2->vsig_tbl[i].in_use &&
2902 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2903 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2904 return ICE_SUCCESS;
2905 }
2906
2907 return ICE_ERR_DOES_NOT_EXIST;
2908 }
2909
2910 /**
2911 * ice_vsig_free - free VSI group
2912 * @hw: pointer to the hardware structure
2913 * @blk: HW block
2914 * @vsig: VSIG to remove
2915 *
2916 * The function will remove all VSIs associated with the input VSIG and move
2917 * them to the DEFAULT_VSIG and mark the VSIG available.
2918 */
2919 static enum ice_status
ice_vsig_free(struct ice_hw * hw,enum ice_block blk,u16 vsig)2920 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2921 {
2922 struct ice_vsig_prof *dtmp, *del;
2923 struct ice_vsig_vsi *vsi_cur;
2924 u16 idx;
2925
2926 idx = vsig & ICE_VSIG_IDX_M;
2927 if (idx >= ICE_MAX_VSIGS)
2928 return ICE_ERR_PARAM;
2929
2930 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2931 return ICE_ERR_DOES_NOT_EXIST;
2932
2933 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2934
2935 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2936 /* If the VSIG has at least 1 VSI then iterate through the
2937 * list and remove the VSIs before deleting the group.
2938 */
2939 if (vsi_cur) {
2940 /* remove all vsis associated with this VSIG XLT2 entry */
2941 do {
2942 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2943
2944 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2945 vsi_cur->changed = 1;
2946 vsi_cur->next_vsi = NULL;
2947 vsi_cur = tmp;
2948 } while (vsi_cur);
2949
2950 /* NULL terminate head of VSI list */
2951 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2952 }
2953
2954 /* free characteristic list */
2955 LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
2956 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2957 ice_vsig_prof, list) {
2958 LIST_DEL(&del->list);
2959 ice_free(hw, del);
2960 }
2961
2962 /* if VSIG characteristic list was cleared for reset
2963 * re-initialize the list head
2964 */
2965 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2966
2967 return ICE_SUCCESS;
2968 }
2969
2970 /**
2971 * ice_vsig_remove_vsi - remove VSI from VSIG
2972 * @hw: pointer to the hardware structure
2973 * @blk: HW block
2974 * @vsi: VSI to remove
2975 * @vsig: VSI group to remove from
2976 *
2977 * The function will remove the input VSI from its VSI group and move it
2978 * to the DEFAULT_VSIG.
2979 */
2980 static enum ice_status
ice_vsig_remove_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig)2981 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2982 {
2983 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2984 u16 idx;
2985
2986 idx = vsig & ICE_VSIG_IDX_M;
2987
2988 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2989 return ICE_ERR_PARAM;
2990
2991 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2992 return ICE_ERR_DOES_NOT_EXIST;
2993
2994 /* entry already in default VSIG, don't have to remove */
2995 if (idx == ICE_DEFAULT_VSIG)
2996 return ICE_SUCCESS;
2997
2998 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2999 if (!(*vsi_head))
3000 return ICE_ERR_CFG;
3001
3002 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
3003 vsi_cur = (*vsi_head);
3004
3005 /* iterate the VSI list, skip over the entry to be removed */
3006 while (vsi_cur) {
3007 if (vsi_tgt == vsi_cur) {
3008 (*vsi_head) = vsi_cur->next_vsi;
3009 break;
3010 }
3011 vsi_head = &vsi_cur->next_vsi;
3012 vsi_cur = vsi_cur->next_vsi;
3013 }
3014
3015 /* verify if VSI was removed from group list */
3016 if (!vsi_cur)
3017 return ICE_ERR_DOES_NOT_EXIST;
3018
3019 vsi_cur->vsig = ICE_DEFAULT_VSIG;
3020 vsi_cur->changed = 1;
3021 vsi_cur->next_vsi = NULL;
3022
3023 return ICE_SUCCESS;
3024 }
3025
3026 /**
3027 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
3028 * @hw: pointer to the hardware structure
3029 * @blk: HW block
3030 * @vsi: VSI to move
3031 * @vsig: destination VSI group
3032 *
3033 * This function will move or add the input VSI to the target VSIG.
3034 * The function will find the original VSIG the VSI belongs to and
3035 * move the entry to the DEFAULT_VSIG, update the original VSIG and
3036 * then move entry to the new VSIG.
3037 */
3038 static enum ice_status
ice_vsig_add_mv_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig)3039 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
3040 {
3041 struct ice_vsig_vsi *tmp;
3042 enum ice_status status;
3043 u16 orig_vsig, idx;
3044
3045 idx = vsig & ICE_VSIG_IDX_M;
3046
3047 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
3048 return ICE_ERR_PARAM;
3049
3050 /* if VSIG not in use and VSIG is not default type this VSIG
3051 * doesn't exist.
3052 */
3053 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
3054 vsig != ICE_DEFAULT_VSIG)
3055 return ICE_ERR_DOES_NOT_EXIST;
3056
3057 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
3058 if (status)
3059 return status;
3060
3061 /* no update required if vsigs match */
3062 if (orig_vsig == vsig)
3063 return ICE_SUCCESS;
3064
3065 if (orig_vsig != ICE_DEFAULT_VSIG) {
3066 /* remove entry from orig_vsig and add to default VSIG */
3067 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
3068 if (status)
3069 return status;
3070 }
3071
3072 if (idx == ICE_DEFAULT_VSIG)
3073 return ICE_SUCCESS;
3074
3075 /* Create VSI entry and add VSIG and prop_mask values */
3076 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
3077 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
3078
3079 /* Add new entry to the head of the VSIG list */
3080 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3081 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
3082 &hw->blk[blk].xlt2.vsis[vsi];
3083 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
3084 hw->blk[blk].xlt2.t[vsi] = vsig;
3085
3086 return ICE_SUCCESS;
3087 }
3088
3089 /**
3090 * ice_prof_has_mask_idx - determine if profile index masking is identical
3091 * @hw: pointer to the hardware structure
3092 * @blk: HW block
3093 * @prof: profile to check
3094 * @idx: profile index to check
3095 * @mask: mask to match
3096 */
3097 static bool
ice_prof_has_mask_idx(struct ice_hw * hw,enum ice_block blk,u8 prof,u16 idx,u16 mask)3098 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
3099 u16 mask)
3100 {
3101 bool expect_no_mask = false;
3102 bool found = false;
3103 bool match = false;
3104 u16 i;
3105
3106 /* If mask is 0x0000 or 0xffff, then there is no masking */
3107 if (mask == 0 || mask == 0xffff)
3108 expect_no_mask = true;
3109
3110 /* Scan the enabled masks on this profile, for the specified idx */
3111 for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
3112 hw->blk[blk].masks.count; i++)
3113 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
3114 if (hw->blk[blk].masks.masks[i].in_use &&
3115 hw->blk[blk].masks.masks[i].idx == idx) {
3116 found = true;
3117 if (hw->blk[blk].masks.masks[i].mask == mask)
3118 match = true;
3119 break;
3120 }
3121
3122 if (expect_no_mask) {
3123 if (found)
3124 return false;
3125 } else {
3126 if (!match)
3127 return false;
3128 }
3129
3130 return true;
3131 }
3132
3133 /**
3134 * ice_prof_has_mask - determine if profile masking is identical
3135 * @hw: pointer to the hardware structure
3136 * @blk: HW block
3137 * @prof: profile to check
3138 * @masks: masks to match
3139 */
3140 static bool
ice_prof_has_mask(struct ice_hw * hw,enum ice_block blk,u8 prof,u16 * masks)3141 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
3142 {
3143 u16 i;
3144
3145 /* es->mask_ena[prof] will have the mask */
3146 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3147 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
3148 return false;
3149
3150 return true;
3151 }
3152
3153 /**
3154 * ice_find_prof_id_with_mask - find profile ID for a given field vector
3155 * @hw: pointer to the hardware structure
3156 * @blk: HW block
3157 * @fv: field vector to search for
3158 * @masks: masks for fv
3159 * @prof_id: receives the profile ID
3160 */
3161 static enum ice_status
ice_find_prof_id_with_mask(struct ice_hw * hw,enum ice_block blk,struct ice_fv_word * fv,u16 * masks,u8 * prof_id)3162 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
3163 struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
3164 {
3165 struct ice_es *es = &hw->blk[blk].es;
3166 u8 i;
3167
3168 /* For FD and RSS, we don't want to re-use an existed profile with the
3169 * same field vector and mask. This will cause rule interference.
3170 */
3171 if (blk == ICE_BLK_FD || blk == ICE_BLK_RSS)
3172 return ICE_ERR_DOES_NOT_EXIST;
3173
3174 for (i = 0; i < (u8)es->count; i++) {
3175 u16 off = i * es->fvw;
3176
3177 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
3178 continue;
3179
3180 /* check if masks settings are the same for this profile */
3181 if (masks && !ice_prof_has_mask(hw, blk, i, masks))
3182 continue;
3183
3184 *prof_id = i;
3185 return ICE_SUCCESS;
3186 }
3187
3188 return ICE_ERR_DOES_NOT_EXIST;
3189 }
3190
3191 /**
3192 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
3193 * @blk: the block type
3194 * @rsrc_type: pointer to variable to receive the resource type
3195 */
ice_prof_id_rsrc_type(enum ice_block blk,u16 * rsrc_type)3196 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3197 {
3198 switch (blk) {
3199 case ICE_BLK_SW:
3200 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
3201 break;
3202 case ICE_BLK_ACL:
3203 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
3204 break;
3205 case ICE_BLK_FD:
3206 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
3207 break;
3208 case ICE_BLK_RSS:
3209 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
3210 break;
3211 case ICE_BLK_PE:
3212 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
3213 break;
3214 default:
3215 return false;
3216 }
3217 return true;
3218 }
3219
3220 /**
3221 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
3222 * @blk: the block type
3223 * @rsrc_type: pointer to variable to receive the resource type
3224 */
ice_tcam_ent_rsrc_type(enum ice_block blk,u16 * rsrc_type)3225 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3226 {
3227 switch (blk) {
3228 case ICE_BLK_SW:
3229 *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
3230 break;
3231 case ICE_BLK_ACL:
3232 *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
3233 break;
3234 case ICE_BLK_FD:
3235 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
3236 break;
3237 case ICE_BLK_RSS:
3238 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
3239 break;
3240 case ICE_BLK_PE:
3241 *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
3242 break;
3243 default:
3244 return false;
3245 }
3246 return true;
3247 }
3248
3249 /**
3250 * ice_alloc_tcam_ent - allocate hardware TCAM entry
3251 * @hw: pointer to the HW struct
3252 * @blk: the block to allocate the TCAM for
3253 * @btm: true to allocate from bottom of table, false to allocate from top
3254 * @tcam_idx: pointer to variable to receive the TCAM entry
3255 *
3256 * This function allocates a new entry in a Profile ID TCAM for a specific
3257 * block.
3258 */
3259 static enum ice_status
ice_alloc_tcam_ent(struct ice_hw * hw,enum ice_block blk,bool btm,u16 * tcam_idx)3260 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
3261 u16 *tcam_idx)
3262 {
3263 u16 res_type;
3264
3265 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3266 return ICE_ERR_PARAM;
3267
3268 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
3269 }
3270
3271 /**
3272 * ice_free_tcam_ent - free hardware TCAM entry
3273 * @hw: pointer to the HW struct
3274 * @blk: the block from which to free the TCAM entry
3275 * @tcam_idx: the TCAM entry to free
3276 *
3277 * This function frees an entry in a Profile ID TCAM for a specific block.
3278 */
3279 static enum ice_status
ice_free_tcam_ent(struct ice_hw * hw,enum ice_block blk,u16 tcam_idx)3280 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
3281 {
3282 u16 res_type;
3283
3284 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3285 return ICE_ERR_PARAM;
3286
3287 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
3288 }
3289
3290 /**
3291 * ice_alloc_prof_id - allocate profile ID
3292 * @hw: pointer to the HW struct
3293 * @blk: the block to allocate the profile ID for
3294 * @prof_id: pointer to variable to receive the profile ID
3295 *
3296 * This function allocates a new profile ID, which also corresponds to a Field
3297 * Vector (Extraction Sequence) entry.
3298 */
3299 static enum ice_status
ice_alloc_prof_id(struct ice_hw * hw,enum ice_block blk,u8 * prof_id)3300 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
3301 {
3302 enum ice_status status;
3303 u16 res_type;
3304 u16 get_prof;
3305
3306 if (!ice_prof_id_rsrc_type(blk, &res_type))
3307 return ICE_ERR_PARAM;
3308
3309 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
3310 if (!status)
3311 *prof_id = (u8)get_prof;
3312
3313 return status;
3314 }
3315
3316 /**
3317 * ice_free_prof_id - free profile ID
3318 * @hw: pointer to the HW struct
3319 * @blk: the block from which to free the profile ID
3320 * @prof_id: the profile ID to free
3321 *
3322 * This function frees a profile ID, which also corresponds to a Field Vector.
3323 */
3324 static enum ice_status
ice_free_prof_id(struct ice_hw * hw,enum ice_block blk,u8 prof_id)3325 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3326 {
3327 u16 tmp_prof_id = (u16)prof_id;
3328 u16 res_type;
3329
3330 if (!ice_prof_id_rsrc_type(blk, &res_type))
3331 return ICE_ERR_PARAM;
3332
3333 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
3334 }
3335
3336 /**
3337 * ice_prof_inc_ref - increment reference count for profile
3338 * @hw: pointer to the HW struct
3339 * @blk: the block from which to free the profile ID
3340 * @prof_id: the profile ID for which to increment the reference count
3341 */
3342 static enum ice_status
ice_prof_inc_ref(struct ice_hw * hw,enum ice_block blk,u8 prof_id)3343 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3344 {
3345 if (prof_id > hw->blk[blk].es.count)
3346 return ICE_ERR_PARAM;
3347
3348 hw->blk[blk].es.ref_count[prof_id]++;
3349
3350 return ICE_SUCCESS;
3351 }
3352
3353 /**
3354 * ice_write_prof_mask_reg - write profile mask register
3355 * @hw: pointer to the HW struct
3356 * @blk: hardware block
3357 * @mask_idx: mask index
3358 * @idx: index of the FV which will use the mask
3359 * @mask: the 16-bit mask
3360 */
3361 static void
ice_write_prof_mask_reg(struct ice_hw * hw,enum ice_block blk,u16 mask_idx,u16 idx,u16 mask)3362 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
3363 u16 idx, u16 mask)
3364 {
3365 u32 offset;
3366 u32 val;
3367
3368 switch (blk) {
3369 case ICE_BLK_RSS:
3370 offset = GLQF_HMASK(mask_idx);
3371 val = (idx << GLQF_HMASK_MSK_INDEX_S) &
3372 GLQF_HMASK_MSK_INDEX_M;
3373 val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
3374 break;
3375 case ICE_BLK_FD:
3376 offset = GLQF_FDMASK(mask_idx);
3377 val = (idx << GLQF_FDMASK_MSK_INDEX_S) &
3378 GLQF_FDMASK_MSK_INDEX_M;
3379 val |= (mask << GLQF_FDMASK_MASK_S) &
3380 GLQF_FDMASK_MASK_M;
3381 break;
3382 default:
3383 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
3384 blk);
3385 return;
3386 }
3387
3388 wr32(hw, offset, val);
3389 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
3390 blk, idx, offset, val);
3391 }
3392
3393 /**
3394 * ice_write_prof_mask_enable_res - write profile mask enable register
3395 * @hw: pointer to the HW struct
3396 * @blk: hardware block
3397 * @prof_id: profile ID
3398 * @enable_mask: enable mask
3399 */
3400 static void
ice_write_prof_mask_enable_res(struct ice_hw * hw,enum ice_block blk,u16 prof_id,u32 enable_mask)3401 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
3402 u16 prof_id, u32 enable_mask)
3403 {
3404 u32 offset;
3405
3406 switch (blk) {
3407 case ICE_BLK_RSS:
3408 offset = GLQF_HMASK_SEL(prof_id);
3409 break;
3410 case ICE_BLK_FD:
3411 offset = GLQF_FDMASK_SEL(prof_id);
3412 break;
3413 default:
3414 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
3415 blk);
3416 return;
3417 }
3418
3419 wr32(hw, offset, enable_mask);
3420 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
3421 blk, prof_id, offset, enable_mask);
3422 }
3423
3424 /**
3425 * ice_init_prof_masks - initial prof masks
3426 * @hw: pointer to the HW struct
3427 * @blk: hardware block
3428 */
ice_init_prof_masks(struct ice_hw * hw,enum ice_block blk)3429 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
3430 {
3431 u16 per_pf;
3432 u16 i;
3433
3434 ice_init_lock(&hw->blk[blk].masks.lock);
3435
3436 per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
3437
3438 hw->blk[blk].masks.count = per_pf;
3439 hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
3440
3441 ice_memset(hw->blk[blk].masks.masks, 0,
3442 sizeof(hw->blk[blk].masks.masks), ICE_NONDMA_MEM);
3443
3444 for (i = hw->blk[blk].masks.first;
3445 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3446 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3447 }
3448
3449 /**
3450 * ice_init_all_prof_masks - initial all prof masks
3451 * @hw: pointer to the HW struct
3452 */
ice_init_all_prof_masks(struct ice_hw * hw)3453 void ice_init_all_prof_masks(struct ice_hw *hw)
3454 {
3455 ice_init_prof_masks(hw, ICE_BLK_RSS);
3456 ice_init_prof_masks(hw, ICE_BLK_FD);
3457 }
3458
3459 /**
3460 * ice_alloc_prof_mask - allocate profile mask
3461 * @hw: pointer to the HW struct
3462 * @blk: hardware block
3463 * @idx: index of FV which will use the mask
3464 * @mask: the 16-bit mask
3465 * @mask_idx: variable to receive the mask index
3466 */
3467 static enum ice_status
ice_alloc_prof_mask(struct ice_hw * hw,enum ice_block blk,u16 idx,u16 mask,u16 * mask_idx)3468 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
3469 u16 *mask_idx)
3470 {
3471 bool found_unused = false, found_copy = false;
3472 enum ice_status status = ICE_ERR_MAX_LIMIT;
3473 u16 unused_idx = 0, copy_idx = 0;
3474 u16 i;
3475
3476 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3477 return ICE_ERR_PARAM;
3478
3479 ice_acquire_lock(&hw->blk[blk].masks.lock);
3480
3481 for (i = hw->blk[blk].masks.first;
3482 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3483 if (hw->blk[blk].masks.masks[i].in_use) {
3484 /* if mask is in use and it exactly duplicates the
3485 * desired mask and index, then in can be reused
3486 */
3487 if (hw->blk[blk].masks.masks[i].mask == mask &&
3488 hw->blk[blk].masks.masks[i].idx == idx) {
3489 found_copy = true;
3490 copy_idx = i;
3491 break;
3492 }
3493 } else {
3494 /* save off unused index, but keep searching in case
3495 * there is an exact match later on
3496 */
3497 if (!found_unused) {
3498 found_unused = true;
3499 unused_idx = i;
3500 }
3501 }
3502
3503 if (found_copy)
3504 i = copy_idx;
3505 else if (found_unused)
3506 i = unused_idx;
3507 else
3508 goto err_ice_alloc_prof_mask;
3509
3510 /* update mask for a new entry */
3511 if (found_unused) {
3512 hw->blk[blk].masks.masks[i].in_use = true;
3513 hw->blk[blk].masks.masks[i].mask = mask;
3514 hw->blk[blk].masks.masks[i].idx = idx;
3515 hw->blk[blk].masks.masks[i].ref = 0;
3516 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
3517 }
3518
3519 hw->blk[blk].masks.masks[i].ref++;
3520 *mask_idx = i;
3521 status = ICE_SUCCESS;
3522
3523 err_ice_alloc_prof_mask:
3524 ice_release_lock(&hw->blk[blk].masks.lock);
3525
3526 return status;
3527 }
3528
3529 /**
3530 * ice_free_prof_mask - free profile mask
3531 * @hw: pointer to the HW struct
3532 * @blk: hardware block
3533 * @mask_idx: index of mask
3534 */
3535 static enum ice_status
ice_free_prof_mask(struct ice_hw * hw,enum ice_block blk,u16 mask_idx)3536 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
3537 {
3538 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3539 return ICE_ERR_PARAM;
3540
3541 if (!(mask_idx >= hw->blk[blk].masks.first &&
3542 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
3543 return ICE_ERR_DOES_NOT_EXIST;
3544
3545 ice_acquire_lock(&hw->blk[blk].masks.lock);
3546
3547 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
3548 goto exit_ice_free_prof_mask;
3549
3550 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
3551 hw->blk[blk].masks.masks[mask_idx].ref--;
3552 goto exit_ice_free_prof_mask;
3553 }
3554
3555 /* remove mask */
3556 hw->blk[blk].masks.masks[mask_idx].in_use = false;
3557 hw->blk[blk].masks.masks[mask_idx].mask = 0;
3558 hw->blk[blk].masks.masks[mask_idx].idx = 0;
3559
3560 /* update mask as unused entry */
3561 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
3562 mask_idx);
3563 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
3564
3565 exit_ice_free_prof_mask:
3566 ice_release_lock(&hw->blk[blk].masks.lock);
3567
3568 return ICE_SUCCESS;
3569 }
3570
3571 /**
3572 * ice_free_prof_masks - free all profile masks for a profile
3573 * @hw: pointer to the HW struct
3574 * @blk: hardware block
3575 * @prof_id: profile ID
3576 */
3577 static enum ice_status
ice_free_prof_masks(struct ice_hw * hw,enum ice_block blk,u16 prof_id)3578 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
3579 {
3580 u32 mask_bm;
3581 u16 i;
3582
3583 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3584 return ICE_ERR_PARAM;
3585
3586 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
3587 for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
3588 if (mask_bm & BIT(i))
3589 ice_free_prof_mask(hw, blk, i);
3590
3591 return ICE_SUCCESS;
3592 }
3593
3594 /**
3595 * ice_shutdown_prof_masks - releases lock for masking
3596 * @hw: pointer to the HW struct
3597 * @blk: hardware block
3598 *
3599 * This should be called before unloading the driver
3600 */
ice_shutdown_prof_masks(struct ice_hw * hw,enum ice_block blk)3601 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
3602 {
3603 u16 i;
3604
3605 ice_acquire_lock(&hw->blk[blk].masks.lock);
3606
3607 for (i = hw->blk[blk].masks.first;
3608 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
3609 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3610
3611 hw->blk[blk].masks.masks[i].in_use = false;
3612 hw->blk[blk].masks.masks[i].idx = 0;
3613 hw->blk[blk].masks.masks[i].mask = 0;
3614 }
3615
3616 ice_release_lock(&hw->blk[blk].masks.lock);
3617 ice_destroy_lock(&hw->blk[blk].masks.lock);
3618 }
3619
3620 /**
3621 * ice_shutdown_all_prof_masks - releases all locks for masking
3622 * @hw: pointer to the HW struct
3623 *
3624 * This should be called before unloading the driver
3625 */
ice_shutdown_all_prof_masks(struct ice_hw * hw)3626 void ice_shutdown_all_prof_masks(struct ice_hw *hw)
3627 {
3628 ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
3629 ice_shutdown_prof_masks(hw, ICE_BLK_FD);
3630 }
3631
3632 /**
3633 * ice_update_prof_masking - set registers according to masking
3634 * @hw: pointer to the HW struct
3635 * @blk: hardware block
3636 * @prof_id: profile ID
3637 * @masks: masks
3638 */
3639 static enum ice_status
ice_update_prof_masking(struct ice_hw * hw,enum ice_block blk,u16 prof_id,u16 * masks)3640 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
3641 u16 *masks)
3642 {
3643 bool err = false;
3644 u32 ena_mask = 0;
3645 u16 idx;
3646 u16 i;
3647
3648 /* Only support FD and RSS masking, otherwise nothing to be done */
3649 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3650 return ICE_SUCCESS;
3651
3652 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3653 if (masks[i] && masks[i] != 0xFFFF) {
3654 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
3655 ena_mask |= BIT(idx);
3656 } else {
3657 /* not enough bitmaps */
3658 err = true;
3659 break;
3660 }
3661 }
3662
3663 if (err) {
3664 /* free any bitmaps we have allocated */
3665 for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
3666 if (ena_mask & BIT(i))
3667 ice_free_prof_mask(hw, blk, i);
3668
3669 return ICE_ERR_OUT_OF_RANGE;
3670 }
3671
3672 /* enable the masks for this profile */
3673 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
3674
3675 /* store enabled masks with profile so that they can be freed later */
3676 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
3677
3678 return ICE_SUCCESS;
3679 }
3680
3681 /**
3682 * ice_write_es - write an extraction sequence to hardware
3683 * @hw: pointer to the HW struct
3684 * @blk: the block in which to write the extraction sequence
3685 * @prof_id: the profile ID to write
3686 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
3687 */
3688 static void
ice_write_es(struct ice_hw * hw,enum ice_block blk,u8 prof_id,struct ice_fv_word * fv)3689 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3690 struct ice_fv_word *fv)
3691 {
3692 u16 off;
3693
3694 off = prof_id * hw->blk[blk].es.fvw;
3695 if (!fv) {
3696 ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
3697 sizeof(*fv), ICE_NONDMA_MEM);
3698 hw->blk[blk].es.written[prof_id] = false;
3699 } else {
3700 ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
3701 sizeof(*fv), ICE_NONDMA_TO_NONDMA);
3702 }
3703 }
3704
3705 /**
3706 * ice_prof_dec_ref - decrement reference count for profile
3707 * @hw: pointer to the HW struct
3708 * @blk: the block from which to free the profile ID
3709 * @prof_id: the profile ID for which to decrement the reference count
3710 */
3711 static enum ice_status
ice_prof_dec_ref(struct ice_hw * hw,enum ice_block blk,u8 prof_id)3712 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3713 {
3714 if (prof_id > hw->blk[blk].es.count)
3715 return ICE_ERR_PARAM;
3716
3717 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3718 if (!--hw->blk[blk].es.ref_count[prof_id]) {
3719 ice_write_es(hw, blk, prof_id, NULL);
3720 ice_free_prof_masks(hw, blk, prof_id);
3721 return ice_free_prof_id(hw, blk, prof_id);
3722 }
3723 }
3724
3725 return ICE_SUCCESS;
3726 }
3727
3728 /* Block / table section IDs */
3729 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3730 /* SWITCH */
3731 { ICE_SID_XLT1_SW,
3732 ICE_SID_XLT2_SW,
3733 ICE_SID_PROFID_TCAM_SW,
3734 ICE_SID_PROFID_REDIR_SW,
3735 ICE_SID_FLD_VEC_SW
3736 },
3737
3738 /* ACL */
3739 { ICE_SID_XLT1_ACL,
3740 ICE_SID_XLT2_ACL,
3741 ICE_SID_PROFID_TCAM_ACL,
3742 ICE_SID_PROFID_REDIR_ACL,
3743 ICE_SID_FLD_VEC_ACL
3744 },
3745
3746 /* FD */
3747 { ICE_SID_XLT1_FD,
3748 ICE_SID_XLT2_FD,
3749 ICE_SID_PROFID_TCAM_FD,
3750 ICE_SID_PROFID_REDIR_FD,
3751 ICE_SID_FLD_VEC_FD
3752 },
3753
3754 /* RSS */
3755 { ICE_SID_XLT1_RSS,
3756 ICE_SID_XLT2_RSS,
3757 ICE_SID_PROFID_TCAM_RSS,
3758 ICE_SID_PROFID_REDIR_RSS,
3759 ICE_SID_FLD_VEC_RSS
3760 },
3761
3762 /* PE */
3763 { ICE_SID_XLT1_PE,
3764 ICE_SID_XLT2_PE,
3765 ICE_SID_PROFID_TCAM_PE,
3766 ICE_SID_PROFID_REDIR_PE,
3767 ICE_SID_FLD_VEC_PE
3768 }
3769 };
3770
3771 /**
3772 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
3773 * @hw: pointer to the hardware structure
3774 * @blk: the HW block to initialize
3775 */
ice_init_sw_xlt1_db(struct ice_hw * hw,enum ice_block blk)3776 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3777 {
3778 u16 pt;
3779
3780 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3781 u8 ptg;
3782
3783 ptg = hw->blk[blk].xlt1.t[pt];
3784 if (ptg != ICE_DEFAULT_PTG) {
3785 ice_ptg_alloc_val(hw, blk, ptg);
3786 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3787 }
3788 }
3789 }
3790
3791 /**
3792 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
3793 * @hw: pointer to the hardware structure
3794 * @blk: the HW block to initialize
3795 */
ice_init_sw_xlt2_db(struct ice_hw * hw,enum ice_block blk)3796 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3797 {
3798 u16 vsi;
3799
3800 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3801 u16 vsig;
3802
3803 vsig = hw->blk[blk].xlt2.t[vsi];
3804 if (vsig) {
3805 ice_vsig_alloc_val(hw, blk, vsig);
3806 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3807 /* no changes at this time, since this has been
3808 * initialized from the original package
3809 */
3810 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3811 }
3812 }
3813 }
3814
3815 /**
3816 * ice_init_sw_db - init software database from HW tables
3817 * @hw: pointer to the hardware structure
3818 */
ice_init_sw_db(struct ice_hw * hw)3819 static void ice_init_sw_db(struct ice_hw *hw)
3820 {
3821 u16 i;
3822
3823 for (i = 0; i < ICE_BLK_COUNT; i++) {
3824 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3825 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3826 }
3827 }
3828
3829 /**
3830 * ice_fill_tbl - Reads content of a single table type into database
3831 * @hw: pointer to the hardware structure
3832 * @block_id: Block ID of the table to copy
3833 * @sid: Section ID of the table to copy
3834 *
3835 * Will attempt to read the entire content of a given table of a single block
3836 * into the driver database. We assume that the buffer will always
3837 * be as large or larger than the data contained in the package. If
3838 * this condition is not met, there is most likely an error in the package
3839 * contents.
3840 */
ice_fill_tbl(struct ice_hw * hw,enum ice_block block_id,u32 sid)3841 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3842 {
3843 u32 dst_len, sect_len, offset = 0;
3844 struct ice_prof_redir_section *pr;
3845 struct ice_prof_id_section *pid;
3846 struct ice_xlt1_section *xlt1;
3847 struct ice_xlt2_section *xlt2;
3848 struct ice_sw_fv_section *es;
3849 struct ice_pkg_enum state;
3850 u8 *src, *dst;
3851 void *sect;
3852
3853 /* if the HW segment pointer is null then the first iteration of
3854 * ice_pkg_enum_section() will fail. In this case the HW tables will
3855 * not be filled and return success.
3856 */
3857 if (!hw->seg) {
3858 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3859 return;
3860 }
3861
3862 ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
3863
3864 sect = ice_pkg_enum_section(hw->seg, &state, sid);
3865
3866 while (sect) {
3867 switch (sid) {
3868 case ICE_SID_XLT1_SW:
3869 case ICE_SID_XLT1_FD:
3870 case ICE_SID_XLT1_RSS:
3871 case ICE_SID_XLT1_ACL:
3872 case ICE_SID_XLT1_PE:
3873 xlt1 = (struct ice_xlt1_section *)sect;
3874 src = xlt1->value;
3875 sect_len = LE16_TO_CPU(xlt1->count) *
3876 sizeof(*hw->blk[block_id].xlt1.t);
3877 dst = hw->blk[block_id].xlt1.t;
3878 dst_len = hw->blk[block_id].xlt1.count *
3879 sizeof(*hw->blk[block_id].xlt1.t);
3880 break;
3881 case ICE_SID_XLT2_SW:
3882 case ICE_SID_XLT2_FD:
3883 case ICE_SID_XLT2_RSS:
3884 case ICE_SID_XLT2_ACL:
3885 case ICE_SID_XLT2_PE:
3886 xlt2 = (struct ice_xlt2_section *)sect;
3887 src = (_FORCE_ u8 *)xlt2->value;
3888 sect_len = LE16_TO_CPU(xlt2->count) *
3889 sizeof(*hw->blk[block_id].xlt2.t);
3890 dst = (u8 *)hw->blk[block_id].xlt2.t;
3891 dst_len = hw->blk[block_id].xlt2.count *
3892 sizeof(*hw->blk[block_id].xlt2.t);
3893 break;
3894 case ICE_SID_PROFID_TCAM_SW:
3895 case ICE_SID_PROFID_TCAM_FD:
3896 case ICE_SID_PROFID_TCAM_RSS:
3897 case ICE_SID_PROFID_TCAM_ACL:
3898 case ICE_SID_PROFID_TCAM_PE:
3899 pid = (struct ice_prof_id_section *)sect;
3900 src = (u8 *)pid->entry;
3901 sect_len = LE16_TO_CPU(pid->count) *
3902 sizeof(*hw->blk[block_id].prof.t);
3903 dst = (u8 *)hw->blk[block_id].prof.t;
3904 dst_len = hw->blk[block_id].prof.count *
3905 sizeof(*hw->blk[block_id].prof.t);
3906 break;
3907 case ICE_SID_PROFID_REDIR_SW:
3908 case ICE_SID_PROFID_REDIR_FD:
3909 case ICE_SID_PROFID_REDIR_RSS:
3910 case ICE_SID_PROFID_REDIR_ACL:
3911 case ICE_SID_PROFID_REDIR_PE:
3912 pr = (struct ice_prof_redir_section *)sect;
3913 src = pr->redir_value;
3914 sect_len = LE16_TO_CPU(pr->count) *
3915 sizeof(*hw->blk[block_id].prof_redir.t);
3916 dst = hw->blk[block_id].prof_redir.t;
3917 dst_len = hw->blk[block_id].prof_redir.count *
3918 sizeof(*hw->blk[block_id].prof_redir.t);
3919 break;
3920 case ICE_SID_FLD_VEC_SW:
3921 case ICE_SID_FLD_VEC_FD:
3922 case ICE_SID_FLD_VEC_RSS:
3923 case ICE_SID_FLD_VEC_ACL:
3924 case ICE_SID_FLD_VEC_PE:
3925 es = (struct ice_sw_fv_section *)sect;
3926 src = (u8 *)es->fv;
3927 sect_len = (u32)(LE16_TO_CPU(es->count) *
3928 hw->blk[block_id].es.fvw) *
3929 sizeof(*hw->blk[block_id].es.t);
3930 dst = (u8 *)hw->blk[block_id].es.t;
3931 dst_len = (u32)(hw->blk[block_id].es.count *
3932 hw->blk[block_id].es.fvw) *
3933 sizeof(*hw->blk[block_id].es.t);
3934 break;
3935 default:
3936 return;
3937 }
3938
3939 /* if the section offset exceeds destination length, terminate
3940 * table fill.
3941 */
3942 if (offset > dst_len)
3943 return;
3944
3945 /* if the sum of section size and offset exceed destination size
3946 * then we are out of bounds of the HW table size for that PF.
3947 * Changing section length to fill the remaining table space
3948 * of that PF.
3949 */
3950 if ((offset + sect_len) > dst_len)
3951 sect_len = dst_len - offset;
3952
3953 ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
3954 offset += sect_len;
3955 sect = ice_pkg_enum_section(NULL, &state, sid);
3956 }
3957 }
3958
3959 /**
3960 * ice_fill_blk_tbls - Read package context for tables
3961 * @hw: pointer to the hardware structure
3962 *
3963 * Reads the current package contents and populates the driver
3964 * database with the data iteratively for all advanced feature
3965 * blocks. Assume that the HW tables have been allocated.
3966 */
ice_fill_blk_tbls(struct ice_hw * hw)3967 void ice_fill_blk_tbls(struct ice_hw *hw)
3968 {
3969 u8 i;
3970
3971 for (i = 0; i < ICE_BLK_COUNT; i++) {
3972 enum ice_block blk_id = (enum ice_block)i;
3973
3974 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3975 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3976 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3977 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3978 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3979 }
3980
3981 ice_init_sw_db(hw);
3982 }
3983
3984 /**
3985 * ice_free_prof_map - free profile map
3986 * @hw: pointer to the hardware structure
3987 * @blk_idx: HW block index
3988 */
ice_free_prof_map(struct ice_hw * hw,u8 blk_idx)3989 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3990 {
3991 struct ice_es *es = &hw->blk[blk_idx].es;
3992 struct ice_prof_map *del, *tmp;
3993
3994 ice_acquire_lock(&es->prof_map_lock);
3995 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
3996 ice_prof_map, list) {
3997 LIST_DEL(&del->list);
3998 ice_free(hw, del);
3999 }
4000 INIT_LIST_HEAD(&es->prof_map);
4001 ice_release_lock(&es->prof_map_lock);
4002 }
4003
4004 /**
4005 * ice_free_flow_profs - free flow profile entries
4006 * @hw: pointer to the hardware structure
4007 * @blk_idx: HW block index
4008 */
ice_free_flow_profs(struct ice_hw * hw,u8 blk_idx)4009 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
4010 {
4011 struct ice_flow_prof *p, *tmp;
4012
4013 ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
4014 LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
4015 ice_flow_prof, l_entry) {
4016 struct ice_flow_entry *e, *t;
4017
4018 LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
4019 ice_flow_entry, l_entry)
4020 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
4021 ICE_FLOW_ENTRY_HNDL(e));
4022
4023 LIST_DEL(&p->l_entry);
4024 if (p->acts)
4025 ice_free(hw, p->acts);
4026
4027 ice_destroy_lock(&p->entries_lock);
4028 ice_free(hw, p);
4029 }
4030 ice_release_lock(&hw->fl_profs_locks[blk_idx]);
4031
4032 /* if driver is in reset and tables are being cleared
4033 * re-initialize the flow profile list heads
4034 */
4035 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
4036 }
4037
4038 /**
4039 * ice_free_vsig_tbl - free complete VSIG table entries
4040 * @hw: pointer to the hardware structure
4041 * @blk: the HW block on which to free the VSIG table entries
4042 */
ice_free_vsig_tbl(struct ice_hw * hw,enum ice_block blk)4043 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
4044 {
4045 u16 i;
4046
4047 if (!hw->blk[blk].xlt2.vsig_tbl)
4048 return;
4049
4050 for (i = 1; i < ICE_MAX_VSIGS; i++)
4051 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
4052 ice_vsig_free(hw, blk, i);
4053 }
4054
4055 /**
4056 * ice_free_hw_tbls - free hardware table memory
4057 * @hw: pointer to the hardware structure
4058 */
ice_free_hw_tbls(struct ice_hw * hw)4059 void ice_free_hw_tbls(struct ice_hw *hw)
4060 {
4061 struct ice_rss_cfg *r, *rt;
4062 u8 i;
4063
4064 for (i = 0; i < ICE_BLK_COUNT; i++) {
4065 if (hw->blk[i].is_list_init) {
4066 struct ice_es *es = &hw->blk[i].es;
4067
4068 ice_free_prof_map(hw, i);
4069 ice_destroy_lock(&es->prof_map_lock);
4070 ice_free_flow_profs(hw, i);
4071 ice_destroy_lock(&hw->fl_profs_locks[i]);
4072
4073 hw->blk[i].is_list_init = false;
4074 }
4075 ice_free_vsig_tbl(hw, (enum ice_block)i);
4076 ice_free(hw, hw->blk[i].xlt1.ptypes);
4077 ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
4078 ice_free(hw, hw->blk[i].xlt1.t);
4079 ice_free(hw, hw->blk[i].xlt2.t);
4080 ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
4081 ice_free(hw, hw->blk[i].xlt2.vsis);
4082 ice_free(hw, hw->blk[i].prof.t);
4083 ice_free(hw, hw->blk[i].prof_redir.t);
4084 ice_free(hw, hw->blk[i].es.t);
4085 ice_free(hw, hw->blk[i].es.ref_count);
4086 ice_free(hw, hw->blk[i].es.written);
4087 ice_free(hw, hw->blk[i].es.mask_ena);
4088 }
4089
4090 LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
4091 ice_rss_cfg, l_entry) {
4092 LIST_DEL(&r->l_entry);
4093 ice_free(hw, r);
4094 }
4095 ice_destroy_lock(&hw->rss_locks);
4096 if (!hw->dcf_enabled)
4097 ice_shutdown_all_prof_masks(hw);
4098 ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
4099 }
4100
4101 /**
4102 * ice_init_flow_profs - init flow profile locks and list heads
4103 * @hw: pointer to the hardware structure
4104 * @blk_idx: HW block index
4105 */
ice_init_flow_profs(struct ice_hw * hw,u8 blk_idx)4106 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
4107 {
4108 ice_init_lock(&hw->fl_profs_locks[blk_idx]);
4109 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
4110 }
4111
4112 /**
4113 * ice_clear_hw_tbls - clear HW tables and flow profiles
4114 * @hw: pointer to the hardware structure
4115 */
ice_clear_hw_tbls(struct ice_hw * hw)4116 void ice_clear_hw_tbls(struct ice_hw *hw)
4117 {
4118 u8 i;
4119
4120 for (i = 0; i < ICE_BLK_COUNT; i++) {
4121 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
4122 struct ice_prof_tcam *prof = &hw->blk[i].prof;
4123 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
4124 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
4125 struct ice_es *es = &hw->blk[i].es;
4126
4127 if (hw->blk[i].is_list_init) {
4128 ice_free_prof_map(hw, i);
4129 ice_free_flow_profs(hw, i);
4130 }
4131
4132 ice_free_vsig_tbl(hw, (enum ice_block)i);
4133
4134 ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
4135 ICE_NONDMA_MEM);
4136 ice_memset(xlt1->ptg_tbl, 0,
4137 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
4138 ICE_NONDMA_MEM);
4139 ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
4140 ICE_NONDMA_MEM);
4141
4142 ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
4143 ICE_NONDMA_MEM);
4144 ice_memset(xlt2->vsig_tbl, 0,
4145 xlt2->count * sizeof(*xlt2->vsig_tbl),
4146 ICE_NONDMA_MEM);
4147 ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
4148 ICE_NONDMA_MEM);
4149
4150 ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
4151 ICE_NONDMA_MEM);
4152 ice_memset(prof_redir->t, 0,
4153 prof_redir->count * sizeof(*prof_redir->t),
4154 ICE_NONDMA_MEM);
4155
4156 ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
4157 ICE_NONDMA_MEM);
4158 ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
4159 ICE_NONDMA_MEM);
4160 ice_memset(es->written, 0, es->count * sizeof(*es->written),
4161 ICE_NONDMA_MEM);
4162 ice_memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena),
4163 ICE_NONDMA_MEM);
4164 }
4165 }
4166
4167 /**
4168 * ice_init_hw_tbls - init hardware table memory
4169 * @hw: pointer to the hardware structure
4170 */
ice_init_hw_tbls(struct ice_hw * hw)4171 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
4172 {
4173 u8 i;
4174
4175 ice_init_lock(&hw->rss_locks);
4176 INIT_LIST_HEAD(&hw->rss_list_head);
4177 if (!hw->dcf_enabled)
4178 ice_init_all_prof_masks(hw);
4179 for (i = 0; i < ICE_BLK_COUNT; i++) {
4180 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
4181 struct ice_prof_tcam *prof = &hw->blk[i].prof;
4182 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
4183 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
4184 struct ice_es *es = &hw->blk[i].es;
4185 u16 j;
4186
4187 if (hw->blk[i].is_list_init)
4188 continue;
4189
4190 ice_init_flow_profs(hw, i);
4191 ice_init_lock(&es->prof_map_lock);
4192 INIT_LIST_HEAD(&es->prof_map);
4193 hw->blk[i].is_list_init = true;
4194
4195 hw->blk[i].overwrite = blk_sizes[i].overwrite;
4196 es->reverse = blk_sizes[i].reverse;
4197
4198 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
4199 xlt1->count = blk_sizes[i].xlt1;
4200
4201 xlt1->ptypes = (struct ice_ptg_ptype *)
4202 ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
4203
4204 if (!xlt1->ptypes)
4205 goto err;
4206
4207 xlt1->ptg_tbl = (struct ice_ptg_entry *)
4208 ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
4209
4210 if (!xlt1->ptg_tbl)
4211 goto err;
4212
4213 xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
4214 if (!xlt1->t)
4215 goto err;
4216
4217 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
4218 xlt2->count = blk_sizes[i].xlt2;
4219
4220 xlt2->vsis = (struct ice_vsig_vsi *)
4221 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
4222
4223 if (!xlt2->vsis)
4224 goto err;
4225
4226 xlt2->vsig_tbl = (struct ice_vsig_entry *)
4227 ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
4228 if (!xlt2->vsig_tbl)
4229 goto err;
4230
4231 for (j = 0; j < xlt2->count; j++)
4232 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
4233
4234 xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
4235 if (!xlt2->t)
4236 goto err;
4237
4238 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
4239 prof->count = blk_sizes[i].prof_tcam;
4240 prof->max_prof_id = blk_sizes[i].prof_id;
4241 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
4242 prof->t = (struct ice_prof_tcam_entry *)
4243 ice_calloc(hw, prof->count, sizeof(*prof->t));
4244
4245 if (!prof->t)
4246 goto err;
4247
4248 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
4249 prof_redir->count = blk_sizes[i].prof_redir;
4250 prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
4251 sizeof(*prof_redir->t));
4252
4253 if (!prof_redir->t)
4254 goto err;
4255
4256 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
4257 es->count = blk_sizes[i].es;
4258 es->fvw = blk_sizes[i].fvw;
4259 es->t = (struct ice_fv_word *)
4260 ice_calloc(hw, (u32)(es->count * es->fvw),
4261 sizeof(*es->t));
4262 if (!es->t)
4263 goto err;
4264
4265 es->ref_count = (u16 *)
4266 ice_calloc(hw, es->count, sizeof(*es->ref_count));
4267
4268 if (!es->ref_count)
4269 goto err;
4270
4271 es->written = (u8 *)
4272 ice_calloc(hw, es->count, sizeof(*es->written));
4273
4274 if (!es->written)
4275 goto err;
4276
4277 es->mask_ena = (u32 *)
4278 ice_calloc(hw, es->count, sizeof(*es->mask_ena));
4279
4280 if (!es->mask_ena)
4281 goto err;
4282 }
4283 return ICE_SUCCESS;
4284
4285 err:
4286 ice_free_hw_tbls(hw);
4287 return ICE_ERR_NO_MEMORY;
4288 }
4289
4290 /**
4291 * ice_prof_gen_key - generate profile ID key
4292 * @hw: pointer to the HW struct
4293 * @blk: the block in which to write profile ID to
4294 * @ptg: packet type group (PTG) portion of key
4295 * @vsig: VSIG portion of key
4296 * @cdid: CDID portion of key
4297 * @flags: flag portion of key
4298 * @vl_msk: valid mask
4299 * @dc_msk: don't care mask
4300 * @nm_msk: never match mask
4301 * @key: output of profile ID key
4302 */
4303 static enum ice_status
ice_prof_gen_key(struct ice_hw * hw,enum ice_block blk,u8 ptg,u16 vsig,u8 cdid,u16 flags,u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],u8 key[ICE_TCAM_KEY_SZ])4304 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
4305 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4306 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
4307 u8 key[ICE_TCAM_KEY_SZ])
4308 {
4309 struct ice_prof_id_key inkey;
4310
4311 inkey.xlt1 = ptg;
4312 inkey.xlt2_cdid = CPU_TO_LE16(vsig);
4313 inkey.flags = CPU_TO_LE16(flags);
4314
4315 switch (hw->blk[blk].prof.cdid_bits) {
4316 case 0:
4317 break;
4318 case 2:
4319 #define ICE_CD_2_M 0xC000U
4320 #define ICE_CD_2_S 14
4321 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
4322 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
4323 break;
4324 case 4:
4325 #define ICE_CD_4_M 0xF000U
4326 #define ICE_CD_4_S 12
4327 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
4328 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
4329 break;
4330 case 8:
4331 #define ICE_CD_8_M 0xFF00U
4332 #define ICE_CD_8_S 16
4333 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
4334 inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
4335 break;
4336 default:
4337 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
4338 break;
4339 }
4340
4341 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
4342 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
4343 }
4344
4345 /**
4346 * ice_tcam_write_entry - write TCAM entry
4347 * @hw: pointer to the HW struct
4348 * @blk: the block in which to write profile ID to
4349 * @idx: the entry index to write to
4350 * @prof_id: profile ID
4351 * @ptg: packet type group (PTG) portion of key
4352 * @vsig: VSIG portion of key
4353 * @cdid: CDID portion of key
4354 * @flags: flag portion of key
4355 * @vl_msk: valid mask
4356 * @dc_msk: don't care mask
4357 * @nm_msk: never match mask
4358 */
4359 static enum ice_status
ice_tcam_write_entry(struct ice_hw * hw,enum ice_block blk,u16 idx,u8 prof_id,u8 ptg,u16 vsig,u8 cdid,u16 flags,u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])4360 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
4361 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
4362 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4363 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
4364 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
4365 {
4366 struct ice_prof_tcam_entry;
4367 enum ice_status status;
4368
4369 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
4370 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
4371 if (!status) {
4372 hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
4373 hw->blk[blk].prof.t[idx].prof_id = prof_id;
4374 }
4375
4376 return status;
4377 }
4378
4379 /**
4380 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
4381 * @hw: pointer to the hardware structure
4382 * @blk: HW block
4383 * @vsig: VSIG to query
4384 * @refs: pointer to variable to receive the reference count
4385 */
4386 static enum ice_status
ice_vsig_get_ref(struct ice_hw * hw,enum ice_block blk,u16 vsig,u16 * refs)4387 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
4388 {
4389 u16 idx = vsig & ICE_VSIG_IDX_M;
4390 struct ice_vsig_vsi *ptr;
4391
4392 *refs = 0;
4393
4394 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
4395 return ICE_ERR_DOES_NOT_EXIST;
4396
4397 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4398 while (ptr) {
4399 (*refs)++;
4400 ptr = ptr->next_vsi;
4401 }
4402
4403 return ICE_SUCCESS;
4404 }
4405
4406 /**
4407 * ice_has_prof_vsig - check to see if VSIG has a specific profile
4408 * @hw: pointer to the hardware structure
4409 * @blk: HW block
4410 * @vsig: VSIG to check against
4411 * @hdl: profile handle
4412 */
4413 static bool
ice_has_prof_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,u64 hdl)4414 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
4415 {
4416 u16 idx = vsig & ICE_VSIG_IDX_M;
4417 struct ice_vsig_prof *ent;
4418
4419 LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4420 ice_vsig_prof, list)
4421 if (ent->profile_cookie == hdl)
4422 return true;
4423
4424 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
4425 vsig);
4426 return false;
4427 }
4428
4429 /**
4430 * ice_prof_bld_es - build profile ID extraction sequence changes
4431 * @hw: pointer to the HW struct
4432 * @blk: hardware block
4433 * @bld: the update package buffer build to add to
4434 * @chgs: the list of changes to make in hardware
4435 */
4436 static enum ice_status
ice_prof_bld_es(struct ice_hw * hw,enum ice_block blk,struct ice_buf_build * bld,struct LIST_HEAD_TYPE * chgs)4437 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
4438 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4439 {
4440 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
4441 struct ice_chs_chg *tmp;
4442
4443 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4444 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
4445 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
4446 struct ice_pkg_es *p;
4447 u32 id;
4448
4449 id = ice_sect_id(blk, ICE_VEC_TBL);
4450 p = (struct ice_pkg_es *)
4451 ice_pkg_buf_alloc_section(bld, id,
4452 ice_struct_size(p, es,
4453 1) +
4454 vec_size -
4455 sizeof(p->es[0]));
4456
4457 if (!p)
4458 return ICE_ERR_MAX_LIMIT;
4459
4460 p->count = CPU_TO_LE16(1);
4461 p->offset = CPU_TO_LE16(tmp->prof_id);
4462
4463 ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
4464 ICE_NONDMA_TO_NONDMA);
4465 }
4466
4467 return ICE_SUCCESS;
4468 }
4469
4470 /**
4471 * ice_prof_bld_tcam - build profile ID TCAM changes
4472 * @hw: pointer to the HW struct
4473 * @blk: hardware block
4474 * @bld: the update package buffer build to add to
4475 * @chgs: the list of changes to make in hardware
4476 */
4477 static enum ice_status
ice_prof_bld_tcam(struct ice_hw * hw,enum ice_block blk,struct ice_buf_build * bld,struct LIST_HEAD_TYPE * chgs)4478 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
4479 struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
4480 {
4481 struct ice_chs_chg *tmp;
4482
4483 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4484 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
4485 struct ice_prof_id_section *p;
4486 u32 id;
4487
4488 id = ice_sect_id(blk, ICE_PROF_TCAM);
4489 p = (struct ice_prof_id_section *)
4490 ice_pkg_buf_alloc_section(bld, id,
4491 ice_struct_size(p,
4492 entry,
4493 1));
4494
4495 if (!p)
4496 return ICE_ERR_MAX_LIMIT;
4497
4498 p->count = CPU_TO_LE16(1);
4499 p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
4500 p->entry[0].prof_id = tmp->prof_id;
4501
4502 ice_memcpy(p->entry[0].key,
4503 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
4504 sizeof(hw->blk[blk].prof.t->key),
4505 ICE_NONDMA_TO_NONDMA);
4506 }
4507
4508 return ICE_SUCCESS;
4509 }
4510
4511 /**
4512 * ice_prof_bld_xlt1 - build XLT1 changes
4513 * @blk: hardware block
4514 * @bld: the update package buffer build to add to
4515 * @chgs: the list of changes to make in hardware
4516 */
4517 static enum ice_status
ice_prof_bld_xlt1(enum ice_block blk,struct ice_buf_build * bld,struct LIST_HEAD_TYPE * chgs)4518 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
4519 struct LIST_HEAD_TYPE *chgs)
4520 {
4521 struct ice_chs_chg *tmp;
4522
4523 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
4524 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
4525 struct ice_xlt1_section *p;
4526 u32 id;
4527
4528 id = ice_sect_id(blk, ICE_XLT1);
4529 p = (struct ice_xlt1_section *)
4530 ice_pkg_buf_alloc_section(bld, id,
4531 ice_struct_size(p,
4532 value,
4533 1));
4534
4535 if (!p)
4536 return ICE_ERR_MAX_LIMIT;
4537
4538 p->count = CPU_TO_LE16(1);
4539 p->offset = CPU_TO_LE16(tmp->ptype);
4540 p->value[0] = tmp->ptg;
4541 }
4542
4543 return ICE_SUCCESS;
4544 }
4545
4546 /**
4547 * ice_prof_bld_xlt2 - build XLT2 changes
4548 * @blk: hardware block
4549 * @bld: the update package buffer build to add to
4550 * @chgs: the list of changes to make in hardware
4551 */
4552 static enum ice_status
ice_prof_bld_xlt2(enum ice_block blk,struct ice_buf_build * bld,struct LIST_HEAD_TYPE * chgs)4553 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4554 struct LIST_HEAD_TYPE *chgs)
4555 {
4556 struct ice_chs_chg *tmp;
4557
4558 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4559 struct ice_xlt2_section *p;
4560 u32 id;
4561
4562 switch (tmp->type) {
4563 case ICE_VSIG_ADD:
4564 case ICE_VSI_MOVE:
4565 case ICE_VSIG_REM:
4566 id = ice_sect_id(blk, ICE_XLT2);
4567 p = (struct ice_xlt2_section *)
4568 ice_pkg_buf_alloc_section(bld, id,
4569 ice_struct_size(p,
4570 value,
4571 1));
4572
4573 if (!p)
4574 return ICE_ERR_MAX_LIMIT;
4575
4576 p->count = CPU_TO_LE16(1);
4577 p->offset = CPU_TO_LE16(tmp->vsi);
4578 p->value[0] = CPU_TO_LE16(tmp->vsig);
4579 break;
4580 default:
4581 break;
4582 }
4583 }
4584
4585 return ICE_SUCCESS;
4586 }
4587
4588 /**
4589 * ice_upd_prof_hw - update hardware using the change list
4590 * @hw: pointer to the HW struct
4591 * @blk: hardware block
4592 * @chgs: the list of changes to make in hardware
4593 */
4594 static enum ice_status
ice_upd_prof_hw(struct ice_hw * hw,enum ice_block blk,struct LIST_HEAD_TYPE * chgs)4595 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4596 struct LIST_HEAD_TYPE *chgs)
4597 {
4598 struct ice_buf_build *b;
4599 struct ice_chs_chg *tmp;
4600 enum ice_status status;
4601 u16 pkg_sects;
4602 u16 xlt1 = 0;
4603 u16 xlt2 = 0;
4604 u16 tcam = 0;
4605 u16 es = 0;
4606 u16 sects;
4607
4608 /* count number of sections we need */
4609 LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
4610 switch (tmp->type) {
4611 case ICE_PTG_ES_ADD:
4612 if (tmp->add_ptg)
4613 xlt1++;
4614 if (tmp->add_prof)
4615 es++;
4616 break;
4617 case ICE_TCAM_ADD:
4618 tcam++;
4619 break;
4620 case ICE_VSIG_ADD:
4621 case ICE_VSI_MOVE:
4622 case ICE_VSIG_REM:
4623 xlt2++;
4624 break;
4625 default:
4626 break;
4627 }
4628 }
4629 sects = xlt1 + xlt2 + tcam + es;
4630
4631 if (!sects)
4632 return ICE_SUCCESS;
4633
4634 /* Build update package buffer */
4635 b = ice_pkg_buf_alloc(hw);
4636 if (!b)
4637 return ICE_ERR_NO_MEMORY;
4638
4639 status = ice_pkg_buf_reserve_section(b, sects);
4640 if (status)
4641 goto error_tmp;
4642
4643 /* Preserve order of table update: ES, TCAM, PTG, VSIG */
4644 if (es) {
4645 status = ice_prof_bld_es(hw, blk, b, chgs);
4646 if (status)
4647 goto error_tmp;
4648 }
4649
4650 if (tcam) {
4651 status = ice_prof_bld_tcam(hw, blk, b, chgs);
4652 if (status)
4653 goto error_tmp;
4654 }
4655
4656 if (xlt1) {
4657 status = ice_prof_bld_xlt1(blk, b, chgs);
4658 if (status)
4659 goto error_tmp;
4660 }
4661
4662 if (xlt2) {
4663 status = ice_prof_bld_xlt2(blk, b, chgs);
4664 if (status)
4665 goto error_tmp;
4666 }
4667
4668 /* After package buffer build check if the section count in buffer is
4669 * non-zero and matches the number of sections detected for package
4670 * update.
4671 */
4672 pkg_sects = ice_pkg_buf_get_active_sections(b);
4673 if (!pkg_sects || pkg_sects != sects) {
4674 status = ICE_ERR_INVAL_SIZE;
4675 goto error_tmp;
4676 }
4677
4678 /* update package */
4679 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4680 if (status == ICE_ERR_AQ_ERROR)
4681 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
4682
4683 error_tmp:
4684 ice_pkg_buf_free(hw, b);
4685 return status;
4686 }
4687
4688 /**
4689 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
4690 * @hw: pointer to the HW struct
4691 * @prof_id: profile ID
4692 * @mask_sel: mask select
4693 *
4694 * This function enable any of the masks selected by the mask select parameter
4695 * for the profile specified.
4696 */
ice_update_fd_mask(struct ice_hw * hw,u16 prof_id,u32 mask_sel)4697 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
4698 {
4699 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
4700
4701 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
4702 GLQF_FDMASK_SEL(prof_id), mask_sel);
4703 }
4704
4705 struct ice_fd_src_dst_pair {
4706 u8 prot_id;
4707 u8 count;
4708 u16 off;
4709 };
4710
4711 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
4712 /* These are defined in pairs */
4713 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
4714 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
4715
4716 { ICE_PROT_IPV4_IL, 2, 12 },
4717 { ICE_PROT_IPV4_IL, 2, 16 },
4718
4719 { ICE_PROT_IPV4_IL_IL, 2, 12 },
4720 { ICE_PROT_IPV4_IL_IL, 2, 16 },
4721
4722 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
4723 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
4724
4725 { ICE_PROT_IPV6_IL, 8, 8 },
4726 { ICE_PROT_IPV6_IL, 8, 24 },
4727
4728 { ICE_PROT_IPV6_IL_IL, 8, 8 },
4729 { ICE_PROT_IPV6_IL_IL, 8, 24 },
4730
4731 { ICE_PROT_TCP_IL, 1, 0 },
4732 { ICE_PROT_TCP_IL, 1, 2 },
4733
4734 { ICE_PROT_UDP_OF, 1, 0 },
4735 { ICE_PROT_UDP_OF, 1, 2 },
4736
4737 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
4738 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
4739
4740 { ICE_PROT_SCTP_IL, 1, 0 },
4741 { ICE_PROT_SCTP_IL, 1, 2 }
4742 };
4743
4744 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
4745
4746 /**
4747 * ice_update_fd_swap - set register appropriately for a FD FV extraction
4748 * @hw: pointer to the HW struct
4749 * @prof_id: profile ID
4750 * @es: extraction sequence (length of array is determined by the block)
4751 */
4752 static enum ice_status
ice_update_fd_swap(struct ice_hw * hw,u16 prof_id,struct ice_fv_word * es)4753 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
4754 {
4755 ice_declare_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4756 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
4757 #define ICE_FD_FV_NOT_FOUND (-2)
4758 s8 first_free = ICE_FD_FV_NOT_FOUND;
4759 u8 used[ICE_MAX_FV_WORDS] = { 0 };
4760 s8 orig_free, si;
4761 u32 mask_sel = 0;
4762 u8 i, j, k;
4763
4764 ice_zero_bitmap(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4765
4766 /* This code assumes that the Flow Director field vectors are assigned
4767 * from the end of the FV indexes working towards the zero index, that
4768 * only complete fields will be included and will be consecutive, and
4769 * that there are no gaps between valid indexes.
4770 */
4771
4772 /* Determine swap fields present */
4773 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
4774 /* Find the first free entry, assuming right to left population.
4775 * This is where we can start adding additional pairs if needed.
4776 */
4777 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4778 ICE_PROT_INVALID)
4779 first_free = i - 1;
4780
4781 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4782 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4783 es[i].off == ice_fd_pairs[j].off) {
4784 ice_set_bit(j, pair_list);
4785 pair_start[j] = i;
4786 }
4787 }
4788
4789 orig_free = first_free;
4790
4791 /* determine missing swap fields that need to be added */
4792 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4793 u8 bit1 = ice_is_bit_set(pair_list, i + 1);
4794 u8 bit0 = ice_is_bit_set(pair_list, i);
4795
4796 if (bit0 ^ bit1) {
4797 u8 index;
4798
4799 /* add the appropriate 'paired' entry */
4800 if (!bit0)
4801 index = i;
4802 else
4803 index = i + 1;
4804
4805 /* check for room */
4806 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4807 return ICE_ERR_MAX_LIMIT;
4808
4809 /* place in extraction sequence */
4810 for (k = 0; k < ice_fd_pairs[index].count; k++) {
4811 es[first_free - k].prot_id =
4812 ice_fd_pairs[index].prot_id;
4813 es[first_free - k].off =
4814 ice_fd_pairs[index].off + (k * 2);
4815
4816 if (k > first_free)
4817 return ICE_ERR_OUT_OF_RANGE;
4818
4819 /* keep track of non-relevant fields */
4820 mask_sel |= BIT(first_free - k);
4821 }
4822
4823 pair_start[index] = first_free;
4824 first_free -= ice_fd_pairs[index].count;
4825 }
4826 }
4827
4828 /* fill in the swap array */
4829 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4830 while (si >= 0) {
4831 u8 indexes_used = 1;
4832
4833 /* assume flat at this index */
4834 #define ICE_SWAP_VALID 0x80
4835 used[si] = si | ICE_SWAP_VALID;
4836
4837 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4838 si -= indexes_used;
4839 continue;
4840 }
4841
4842 /* check for a swap location */
4843 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4844 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4845 es[si].off == ice_fd_pairs[j].off) {
4846 u8 idx;
4847
4848 /* determine the appropriate matching field */
4849 idx = j + ((j % 2) ? -1 : 1);
4850
4851 indexes_used = ice_fd_pairs[idx].count;
4852 for (k = 0; k < indexes_used; k++) {
4853 used[si - k] = (pair_start[idx] - k) |
4854 ICE_SWAP_VALID;
4855 }
4856
4857 break;
4858 }
4859
4860 si -= indexes_used;
4861 }
4862
4863 /* for each set of 4 swap and 4 inset indexes, write the appropriate
4864 * register
4865 */
4866 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4867 u32 raw_swap = 0;
4868 u32 raw_in = 0;
4869
4870 for (k = 0; k < 4; k++) {
4871 u8 idx;
4872
4873 idx = (j * 4) + k;
4874 if (used[idx] && !(mask_sel & BIT(idx))) {
4875 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4876 #define ICE_INSET_DFLT 0x9f
4877 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4878 }
4879 }
4880
4881 /* write the appropriate swap register set */
4882 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4883
4884 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4885 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4886
4887 /* write the appropriate inset register set */
4888 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4889
4890 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4891 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4892 }
4893
4894 /* initially clear the mask select for this profile */
4895 ice_update_fd_mask(hw, prof_id, 0);
4896
4897 return ICE_SUCCESS;
4898 }
4899
4900 /* The entries here needs to match the order of enum ice_ptype_attrib */
4901 static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4902 { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
4903 { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
4904 { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
4905 { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
4906 };
4907
4908 /**
4909 * ice_get_ptype_attrib_info - get ptype attribute information
4910 * @type: attribute type
4911 * @info: pointer to variable to the attribute information
4912 */
4913 static void
ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,struct ice_ptype_attrib_info * info)4914 ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4915 struct ice_ptype_attrib_info *info)
4916 {
4917 *info = ice_ptype_attributes[type];
4918 }
4919
4920 /**
4921 * ice_add_prof_attrib - add any PTG with attributes to profile
4922 * @prof: pointer to the profile to which PTG entries will be added
4923 * @ptg: PTG to be added
4924 * @ptype: PTYPE that needs to be looked up
4925 * @attr: array of attributes that will be considered
4926 * @attr_cnt: number of elements in the attribute array
4927 */
4928 static enum ice_status
ice_add_prof_attrib(struct ice_prof_map * prof,u8 ptg,u16 ptype,const struct ice_ptype_attributes * attr,u16 attr_cnt)4929 ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4930 const struct ice_ptype_attributes *attr, u16 attr_cnt)
4931 {
4932 bool found = false;
4933 u16 i;
4934
4935 for (i = 0; i < attr_cnt; i++) {
4936 if (attr[i].ptype == ptype) {
4937 found = true;
4938
4939 prof->ptg[prof->ptg_cnt] = ptg;
4940 ice_get_ptype_attrib_info(attr[i].attrib,
4941 &prof->attr[prof->ptg_cnt]);
4942
4943 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4944 return ICE_ERR_MAX_LIMIT;
4945 }
4946 }
4947
4948 if (!found)
4949 return ICE_ERR_DOES_NOT_EXIST;
4950
4951 return ICE_SUCCESS;
4952 }
4953
4954 /**
4955 * ice_disable_fd_swap - set register appropriately to disable FD swap
4956 * @hw: pointer to the HW struct
4957 * @prof_id: profile ID
4958 */
ice_disable_fd_swap(struct ice_hw * hw,u16 prof_id)4959 void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id)
4960 {
4961 u8 swap_val = ICE_SWAP_VALID;
4962 u8 i;
4963 /* Since the SWAP Flag in the Programming Desc doesn't work,
4964 * here add method to disable the SWAP Option via setting
4965 * certain SWAP and INSET register set.
4966 */
4967 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw / 4; i++) {
4968 u32 raw_swap = 0;
4969 u32 raw_in = 0;
4970 u8 j;
4971
4972 for (j = 0; j < 4; j++) {
4973 raw_swap |= (swap_val++) << (j * BITS_PER_BYTE);
4974 raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE);
4975 }
4976
4977 /* write the FDIR swap register set */
4978 wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap);
4979
4980 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4981 prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap);
4982
4983 /* write the FDIR inset register set */
4984 wr32(hw, GLQF_FDINSET(prof_id, i), raw_in);
4985
4986 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4987 prof_id, i, GLQF_FDINSET(prof_id, i), raw_in);
4988 }
4989 }
4990
4991 /**
4992 * ice_add_prof - add profile
4993 * @hw: pointer to the HW struct
4994 * @blk: hardware block
4995 * @id: profile tracking ID
4996 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
4997 * @attr: array of attributes
4998 * @attr_cnt: number of elements in attrib array
4999 * @es: extraction sequence (length of array is determined by the block)
5000 * @masks: mask for extraction sequence
5001 * @fd_swap: enable/disable FDIR paired src/dst fields swap option
5002 *
5003 * This function registers a profile, which matches a set of PTYPES with a
5004 * particular extraction sequence. While the hardware profile is allocated
5005 * it will not be written until the first call to ice_add_flow that specifies
5006 * the ID value used here.
5007 */
5008 enum ice_status
ice_add_prof(struct ice_hw * hw,enum ice_block blk,u64 id,u8 ptypes[],const struct ice_ptype_attributes * attr,u16 attr_cnt,struct ice_fv_word * es,u16 * masks,bool fd_swap)5009 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
5010 const struct ice_ptype_attributes *attr, u16 attr_cnt,
5011 struct ice_fv_word *es, u16 *masks, bool fd_swap)
5012 {
5013 u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
5014 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
5015 struct ice_prof_map *prof;
5016 enum ice_status status;
5017 u8 byte = 0;
5018 u8 prof_id;
5019
5020 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
5021
5022 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5023
5024 /* search for existing profile */
5025 status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
5026 if (status) {
5027 /* allocate profile ID */
5028 status = ice_alloc_prof_id(hw, blk, &prof_id);
5029 if (status)
5030 goto err_ice_add_prof;
5031 if (blk == ICE_BLK_FD && fd_swap) {
5032 /* For Flow Director block, the extraction sequence may
5033 * need to be altered in the case where there are paired
5034 * fields that have no match. This is necessary because
5035 * for Flow Director, src and dest fields need to paired
5036 * for filter programming and these values are swapped
5037 * during Tx.
5038 */
5039 status = ice_update_fd_swap(hw, prof_id, es);
5040 if (status)
5041 goto err_ice_add_prof;
5042 } else if (blk == ICE_BLK_FD) {
5043 ice_disable_fd_swap(hw, prof_id);
5044 }
5045 status = ice_update_prof_masking(hw, blk, prof_id, masks);
5046 if (status)
5047 goto err_ice_add_prof;
5048
5049 /* and write new es */
5050 ice_write_es(hw, blk, prof_id, es);
5051 }
5052
5053 ice_prof_inc_ref(hw, blk, prof_id);
5054
5055 /* add profile info */
5056
5057 prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
5058 if (!prof)
5059 goto err_ice_add_prof;
5060
5061 prof->profile_cookie = id;
5062 prof->prof_id = prof_id;
5063 prof->ptg_cnt = 0;
5064 prof->context = 0;
5065
5066 /* build list of ptgs */
5067 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
5068 u8 bit;
5069
5070 if (!ptypes[byte]) {
5071 bytes--;
5072 byte++;
5073 continue;
5074 }
5075
5076 /* Examine 8 bits per byte */
5077 ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
5078 BITS_PER_BYTE) {
5079 u16 ptype;
5080 u8 ptg;
5081
5082 ptype = byte * BITS_PER_BYTE + bit;
5083
5084 /* The package should place all ptypes in a non-zero
5085 * PTG, so the following call should never fail.
5086 */
5087 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
5088 continue;
5089
5090 /* If PTG is already added, skip and continue */
5091 if (ice_is_bit_set(ptgs_used, ptg))
5092 continue;
5093
5094 ice_set_bit(ptg, ptgs_used);
5095 /* Check to see there are any attributes for this
5096 * ptype, and add them if found.
5097 */
5098 status = ice_add_prof_attrib(prof, ptg, ptype, attr,
5099 attr_cnt);
5100 if (status == ICE_ERR_MAX_LIMIT)
5101 break;
5102 if (status) {
5103 /* This is simple a ptype/PTG with no
5104 * attribute
5105 */
5106 prof->ptg[prof->ptg_cnt] = ptg;
5107 prof->attr[prof->ptg_cnt].flags = 0;
5108 prof->attr[prof->ptg_cnt].mask = 0;
5109
5110 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
5111 break;
5112 }
5113 }
5114
5115 bytes--;
5116 byte++;
5117 }
5118
5119 LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
5120 status = ICE_SUCCESS;
5121
5122 err_ice_add_prof:
5123 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5124 return status;
5125 }
5126
5127 /**
5128 * ice_search_prof_id - Search for a profile tracking ID
5129 * @hw: pointer to the HW struct
5130 * @blk: hardware block
5131 * @id: profile tracking ID
5132 *
5133 * This will search for a profile tracking ID which was previously added.
5134 * The profile map lock should be held before calling this function.
5135 */
5136 struct ice_prof_map *
ice_search_prof_id(struct ice_hw * hw,enum ice_block blk,u64 id)5137 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
5138 {
5139 struct ice_prof_map *entry = NULL;
5140 struct ice_prof_map *map;
5141
5142 LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
5143 if (map->profile_cookie == id) {
5144 entry = map;
5145 break;
5146 }
5147
5148 return entry;
5149 }
5150
5151 /**
5152 * ice_vsig_prof_id_count - count profiles in a VSIG
5153 * @hw: pointer to the HW struct
5154 * @blk: hardware block
5155 * @vsig: VSIG to remove the profile from
5156 */
5157 static u16
ice_vsig_prof_id_count(struct ice_hw * hw,enum ice_block blk,u16 vsig)5158 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
5159 {
5160 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
5161 struct ice_vsig_prof *p;
5162
5163 LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5164 ice_vsig_prof, list)
5165 count++;
5166
5167 return count;
5168 }
5169
5170 /**
5171 * ice_rel_tcam_idx - release a TCAM index
5172 * @hw: pointer to the HW struct
5173 * @blk: hardware block
5174 * @idx: the index to release
5175 */
5176 static enum ice_status
ice_rel_tcam_idx(struct ice_hw * hw,enum ice_block blk,u16 idx)5177 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
5178 {
5179 /* Masks to invoke a never match entry */
5180 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5181 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
5182 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
5183 enum ice_status status;
5184
5185 /* write the TCAM entry */
5186 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
5187 dc_msk, nm_msk);
5188 if (status)
5189 return status;
5190
5191 /* release the TCAM entry */
5192 status = ice_free_tcam_ent(hw, blk, idx);
5193
5194 return status;
5195 }
5196
5197 /**
5198 * ice_rem_prof_id - remove one profile from a VSIG
5199 * @hw: pointer to the HW struct
5200 * @blk: hardware block
5201 * @prof: pointer to profile structure to remove
5202 */
5203 static enum ice_status
ice_rem_prof_id(struct ice_hw * hw,enum ice_block blk,struct ice_vsig_prof * prof)5204 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
5205 struct ice_vsig_prof *prof)
5206 {
5207 enum ice_status status;
5208 u16 i;
5209
5210 for (i = 0; i < prof->tcam_count; i++)
5211 if (prof->tcam[i].in_use) {
5212 prof->tcam[i].in_use = false;
5213 status = ice_rel_tcam_idx(hw, blk,
5214 prof->tcam[i].tcam_idx);
5215 if (status)
5216 return ICE_ERR_HW_TABLE;
5217 }
5218
5219 return ICE_SUCCESS;
5220 }
5221
5222 /**
5223 * ice_rem_vsig - remove VSIG
5224 * @hw: pointer to the HW struct
5225 * @blk: hardware block
5226 * @vsig: the VSIG to remove
5227 * @chg: the change list
5228 */
5229 static enum ice_status
ice_rem_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,struct LIST_HEAD_TYPE * chg)5230 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5231 struct LIST_HEAD_TYPE *chg)
5232 {
5233 u16 idx = vsig & ICE_VSIG_IDX_M;
5234 struct ice_vsig_vsi *vsi_cur;
5235 struct ice_vsig_prof *d, *t;
5236 enum ice_status status;
5237
5238 /* remove TCAM entries */
5239 LIST_FOR_EACH_ENTRY_SAFE(d, t,
5240 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5241 ice_vsig_prof, list) {
5242 status = ice_rem_prof_id(hw, blk, d);
5243 if (status)
5244 return status;
5245
5246 LIST_DEL(&d->list);
5247 ice_free(hw, d);
5248 }
5249
5250 /* Move all VSIS associated with this VSIG to the default VSIG */
5251 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
5252 /* If the VSIG has at least 1 VSI then iterate through the list
5253 * and remove the VSIs before deleting the group.
5254 */
5255 if (vsi_cur)
5256 do {
5257 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
5258 struct ice_chs_chg *p;
5259
5260 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5261 if (!p)
5262 return ICE_ERR_NO_MEMORY;
5263
5264 p->type = ICE_VSIG_REM;
5265 p->orig_vsig = vsig;
5266 p->vsig = ICE_DEFAULT_VSIG;
5267 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
5268
5269 LIST_ADD(&p->list_entry, chg);
5270
5271 vsi_cur = tmp;
5272 } while (vsi_cur);
5273
5274 return ice_vsig_free(hw, blk, vsig);
5275 }
5276
5277 /**
5278 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
5279 * @hw: pointer to the HW struct
5280 * @blk: hardware block
5281 * @vsig: VSIG to remove the profile from
5282 * @hdl: profile handle indicating which profile to remove
5283 * @chg: list to receive a record of changes
5284 */
5285 static enum ice_status
ice_rem_prof_id_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,u64 hdl,struct LIST_HEAD_TYPE * chg)5286 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5287 struct LIST_HEAD_TYPE *chg)
5288 {
5289 u16 idx = vsig & ICE_VSIG_IDX_M;
5290 struct ice_vsig_prof *p, *t;
5291 enum ice_status status;
5292
5293 LIST_FOR_EACH_ENTRY_SAFE(p, t,
5294 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5295 ice_vsig_prof, list)
5296 if (p->profile_cookie == hdl) {
5297 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
5298 /* this is the last profile, remove the VSIG */
5299 return ice_rem_vsig(hw, blk, vsig, chg);
5300
5301 status = ice_rem_prof_id(hw, blk, p);
5302 if (!status) {
5303 LIST_DEL(&p->list);
5304 ice_free(hw, p);
5305 }
5306 return status;
5307 }
5308
5309 return ICE_ERR_DOES_NOT_EXIST;
5310 }
5311
5312 /**
5313 * ice_rem_flow_all - remove all flows with a particular profile
5314 * @hw: pointer to the HW struct
5315 * @blk: hardware block
5316 * @id: profile tracking ID
5317 */
5318 static enum ice_status
ice_rem_flow_all(struct ice_hw * hw,enum ice_block blk,u64 id)5319 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
5320 {
5321 struct ice_chs_chg *del, *tmp;
5322 struct LIST_HEAD_TYPE chg;
5323 enum ice_status status;
5324 u16 i;
5325
5326 INIT_LIST_HEAD(&chg);
5327
5328 for (i = 1; i < ICE_MAX_VSIGS; i++)
5329 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
5330 if (ice_has_prof_vsig(hw, blk, i, id)) {
5331 status = ice_rem_prof_id_vsig(hw, blk, i, id,
5332 &chg);
5333 if (status)
5334 goto err_ice_rem_flow_all;
5335 }
5336 }
5337
5338 status = ice_upd_prof_hw(hw, blk, &chg);
5339
5340 err_ice_rem_flow_all:
5341 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
5342 LIST_DEL(&del->list_entry);
5343 ice_free(hw, del);
5344 }
5345
5346 return status;
5347 }
5348
5349 /**
5350 * ice_rem_prof - remove profile
5351 * @hw: pointer to the HW struct
5352 * @blk: hardware block
5353 * @id: profile tracking ID
5354 *
5355 * This will remove the profile specified by the ID parameter, which was
5356 * previously created through ice_add_prof. If any existing entries
5357 * are associated with this profile, they will be removed as well.
5358 */
ice_rem_prof(struct ice_hw * hw,enum ice_block blk,u64 id)5359 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
5360 {
5361 struct ice_prof_map *pmap;
5362 enum ice_status status;
5363
5364 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5365
5366 pmap = ice_search_prof_id(hw, blk, id);
5367 if (!pmap) {
5368 status = ICE_ERR_DOES_NOT_EXIST;
5369 goto err_ice_rem_prof;
5370 }
5371
5372 /* remove all flows with this profile */
5373 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
5374 if (status)
5375 goto err_ice_rem_prof;
5376
5377 /* dereference profile, and possibly remove */
5378 ice_prof_dec_ref(hw, blk, pmap->prof_id);
5379
5380 LIST_DEL(&pmap->list);
5381 ice_free(hw, pmap);
5382
5383 err_ice_rem_prof:
5384 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5385 return status;
5386 }
5387
5388 /**
5389 * ice_get_prof - get profile
5390 * @hw: pointer to the HW struct
5391 * @blk: hardware block
5392 * @hdl: profile handle
5393 * @chg: change list
5394 */
5395 static enum ice_status
ice_get_prof(struct ice_hw * hw,enum ice_block blk,u64 hdl,struct LIST_HEAD_TYPE * chg)5396 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
5397 struct LIST_HEAD_TYPE *chg)
5398 {
5399 enum ice_status status = ICE_SUCCESS;
5400 struct ice_prof_map *map;
5401 struct ice_chs_chg *p;
5402 u16 i;
5403
5404 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5405 /* Get the details on the profile specified by the handle ID */
5406 map = ice_search_prof_id(hw, blk, hdl);
5407 if (!map) {
5408 status = ICE_ERR_DOES_NOT_EXIST;
5409 goto err_ice_get_prof;
5410 }
5411
5412 for (i = 0; i < map->ptg_cnt; i++)
5413 if (!hw->blk[blk].es.written[map->prof_id]) {
5414 /* add ES to change list */
5415 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5416 if (!p) {
5417 status = ICE_ERR_NO_MEMORY;
5418 goto err_ice_get_prof;
5419 }
5420
5421 p->type = ICE_PTG_ES_ADD;
5422 p->ptype = 0;
5423 p->ptg = map->ptg[i];
5424 p->attr = map->attr[i];
5425 p->add_ptg = 0;
5426
5427 p->add_prof = 1;
5428 p->prof_id = map->prof_id;
5429
5430 hw->blk[blk].es.written[map->prof_id] = true;
5431
5432 LIST_ADD(&p->list_entry, chg);
5433 }
5434
5435 err_ice_get_prof:
5436 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5437 /* let caller clean up the change list */
5438 return status;
5439 }
5440
5441 /**
5442 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
5443 * @hw: pointer to the HW struct
5444 * @blk: hardware block
5445 * @vsig: VSIG from which to copy the list
5446 * @lst: output list
5447 *
5448 * This routine makes a copy of the list of profiles in the specified VSIG.
5449 */
5450 static enum ice_status
ice_get_profs_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,struct LIST_HEAD_TYPE * lst)5451 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5452 struct LIST_HEAD_TYPE *lst)
5453 {
5454 struct ice_vsig_prof *ent1, *ent2;
5455 u16 idx = vsig & ICE_VSIG_IDX_M;
5456
5457 LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5458 ice_vsig_prof, list) {
5459 struct ice_vsig_prof *p;
5460
5461 /* copy to the input list */
5462 p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
5463 ICE_NONDMA_TO_NONDMA);
5464 if (!p)
5465 goto err_ice_get_profs_vsig;
5466
5467 LIST_ADD_TAIL(&p->list, lst);
5468 }
5469
5470 return ICE_SUCCESS;
5471
5472 err_ice_get_profs_vsig:
5473 LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
5474 LIST_DEL(&ent1->list);
5475 ice_free(hw, ent1);
5476 }
5477
5478 return ICE_ERR_NO_MEMORY;
5479 }
5480
5481 /**
5482 * ice_add_prof_to_lst - add profile entry to a list
5483 * @hw: pointer to the HW struct
5484 * @blk: hardware block
5485 * @lst: the list to be added to
5486 * @hdl: profile handle of entry to add
5487 */
5488 static enum ice_status
ice_add_prof_to_lst(struct ice_hw * hw,enum ice_block blk,struct LIST_HEAD_TYPE * lst,u64 hdl)5489 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
5490 struct LIST_HEAD_TYPE *lst, u64 hdl)
5491 {
5492 enum ice_status status = ICE_SUCCESS;
5493 struct ice_prof_map *map;
5494 struct ice_vsig_prof *p;
5495 u16 i;
5496
5497 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5498 map = ice_search_prof_id(hw, blk, hdl);
5499 if (!map) {
5500 status = ICE_ERR_DOES_NOT_EXIST;
5501 goto err_ice_add_prof_to_lst;
5502 }
5503
5504 p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
5505 if (!p) {
5506 status = ICE_ERR_NO_MEMORY;
5507 goto err_ice_add_prof_to_lst;
5508 }
5509
5510 p->profile_cookie = map->profile_cookie;
5511 p->prof_id = map->prof_id;
5512 p->tcam_count = map->ptg_cnt;
5513
5514 for (i = 0; i < map->ptg_cnt; i++) {
5515 p->tcam[i].prof_id = map->prof_id;
5516 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
5517 p->tcam[i].ptg = map->ptg[i];
5518 p->tcam[i].attr = map->attr[i];
5519 }
5520
5521 LIST_ADD(&p->list, lst);
5522
5523 err_ice_add_prof_to_lst:
5524 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5525 return status;
5526 }
5527
5528 /**
5529 * ice_move_vsi - move VSI to another VSIG
5530 * @hw: pointer to the HW struct
5531 * @blk: hardware block
5532 * @vsi: the VSI to move
5533 * @vsig: the VSIG to move the VSI to
5534 * @chg: the change list
5535 */
5536 static enum ice_status
ice_move_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig,struct LIST_HEAD_TYPE * chg)5537 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
5538 struct LIST_HEAD_TYPE *chg)
5539 {
5540 enum ice_status status;
5541 struct ice_chs_chg *p;
5542 u16 orig_vsig;
5543
5544 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5545 if (!p)
5546 return ICE_ERR_NO_MEMORY;
5547
5548 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
5549 if (!status)
5550 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
5551
5552 if (status) {
5553 ice_free(hw, p);
5554 return status;
5555 }
5556
5557 p->type = ICE_VSI_MOVE;
5558 p->vsi = vsi;
5559 p->orig_vsig = orig_vsig;
5560 p->vsig = vsig;
5561
5562 LIST_ADD(&p->list_entry, chg);
5563
5564 return ICE_SUCCESS;
5565 }
5566
5567 /**
5568 * ice_set_tcam_flags - set TCAM flag don't care mask
5569 * @mask: mask for flags
5570 * @dc_mask: pointer to the don't care mask
5571 */
ice_set_tcam_flags(u16 mask,u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])5572 static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
5573 {
5574 u16 *flag_word;
5575
5576 /* flags are lowest u16 */
5577 flag_word = (u16 *)dc_mask;
5578 *flag_word = ~mask;
5579 }
5580
5581 /**
5582 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
5583 * @hw: pointer to the HW struct
5584 * @idx: the index of the TCAM entry to remove
5585 * @chg: the list of change structures to search
5586 */
5587 static void
ice_rem_chg_tcam_ent(struct ice_hw * hw,u16 idx,struct LIST_HEAD_TYPE * chg)5588 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
5589 {
5590 struct ice_chs_chg *pos, *tmp;
5591
5592 LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry)
5593 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
5594 LIST_DEL(&tmp->list_entry);
5595 ice_free(hw, tmp);
5596 }
5597 }
5598
5599 /**
5600 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
5601 * @hw: pointer to the HW struct
5602 * @blk: hardware block
5603 * @enable: true to enable, false to disable
5604 * @vsig: the VSIG of the TCAM entry
5605 * @tcam: pointer the TCAM info structure of the TCAM to disable
5606 * @chg: the change list
5607 *
5608 * This function appends an enable or disable TCAM entry in the change log
5609 */
5610 static enum ice_status
ice_prof_tcam_ena_dis(struct ice_hw * hw,enum ice_block blk,bool enable,u16 vsig,struct ice_tcam_inf * tcam,struct LIST_HEAD_TYPE * chg)5611 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
5612 u16 vsig, struct ice_tcam_inf *tcam,
5613 struct LIST_HEAD_TYPE *chg)
5614 {
5615 enum ice_status status;
5616 struct ice_chs_chg *p;
5617
5618 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5619 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5620 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5621
5622 /* if disabling, free the TCAM */
5623 if (!enable) {
5624 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
5625
5626 /* if we have already created a change for this TCAM entry, then
5627 * we need to remove that entry, in order to prevent writing to
5628 * a TCAM entry we no longer will have ownership of.
5629 */
5630 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
5631 tcam->tcam_idx = 0;
5632 tcam->in_use = 0;
5633 return status;
5634 }
5635
5636 /* for re-enabling, reallocate a TCAM */
5637 /* for entries with empty attribute masks, allocate entry from
5638 * the bottom of the TCAM table; otherwise, allocate from the
5639 * top of the table in order to give it higher priority
5640 */
5641 status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
5642 &tcam->tcam_idx);
5643 if (status)
5644 return status;
5645
5646 /* add TCAM to change list */
5647 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5648 if (!p)
5649 return ICE_ERR_NO_MEMORY;
5650
5651 /* set don't care masks for TCAM flags */
5652 ice_set_tcam_flags(tcam->attr.mask, dc_msk);
5653
5654 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
5655 tcam->ptg, vsig, 0, tcam->attr.flags,
5656 vl_msk, dc_msk, nm_msk);
5657 if (status)
5658 goto err_ice_prof_tcam_ena_dis;
5659
5660 tcam->in_use = 1;
5661
5662 p->type = ICE_TCAM_ADD;
5663 p->add_tcam_idx = true;
5664 p->prof_id = tcam->prof_id;
5665 p->ptg = tcam->ptg;
5666 p->vsig = 0;
5667 p->tcam_idx = tcam->tcam_idx;
5668
5669 /* log change */
5670 LIST_ADD(&p->list_entry, chg);
5671
5672 return ICE_SUCCESS;
5673
5674 err_ice_prof_tcam_ena_dis:
5675 ice_free(hw, p);
5676 return status;
5677 }
5678
5679 /**
5680 * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use
5681 * @ptg_attr: pointer to the PTG and attribute pair to check
5682 * @ptgs_used: bitmap that denotes which PTGs are in use
5683 * @attr_used: array of PTG and attributes pairs already used
5684 * @attr_cnt: count of entries in the attr_used array
5685 */
5686 static bool
ice_ptg_attr_in_use(struct ice_tcam_inf * ptg_attr,ice_bitmap_t * ptgs_used,struct ice_tcam_inf * attr_used[],u16 attr_cnt)5687 ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, ice_bitmap_t *ptgs_used,
5688 struct ice_tcam_inf *attr_used[], u16 attr_cnt)
5689 {
5690 u16 i;
5691
5692 if (!ice_is_bit_set(ptgs_used, ptg_attr->ptg))
5693 return false;
5694
5695 /* the PTG is used, so now look for correct attributes */
5696 for (i = 0; i < attr_cnt; i++)
5697 if (attr_used[i]->ptg == ptg_attr->ptg &&
5698 attr_used[i]->attr.flags == ptg_attr->attr.flags &&
5699 attr_used[i]->attr.mask == ptg_attr->attr.mask)
5700 return true;
5701
5702 return false;
5703 }
5704
5705 /**
5706 * ice_adj_prof_priorities - adjust profile based on priorities
5707 * @hw: pointer to the HW struct
5708 * @blk: hardware block
5709 * @vsig: the VSIG for which to adjust profile priorities
5710 * @chg: the change list
5711 */
5712 static enum ice_status
ice_adj_prof_priorities(struct ice_hw * hw,enum ice_block blk,u16 vsig,struct LIST_HEAD_TYPE * chg)5713 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5714 struct LIST_HEAD_TYPE *chg)
5715 {
5716 ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
5717 struct ice_tcam_inf **attr_used;
5718 enum ice_status status = ICE_SUCCESS;
5719 struct ice_vsig_prof *t;
5720 u16 attr_used_cnt = 0;
5721 u16 idx;
5722
5723 #define ICE_MAX_PTG_ATTRS 1024
5724 attr_used = (struct ice_tcam_inf **)ice_calloc(hw, ICE_MAX_PTG_ATTRS,
5725 sizeof(*attr_used));
5726 if (!attr_used)
5727 return ICE_ERR_NO_MEMORY;
5728
5729 ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
5730 idx = vsig & ICE_VSIG_IDX_M;
5731
5732 /* Priority is based on the order in which the profiles are added. The
5733 * newest added profile has highest priority and the oldest added
5734 * profile has the lowest priority. Since the profile property list for
5735 * a VSIG is sorted from newest to oldest, this code traverses the list
5736 * in order and enables the first of each PTG that it finds (that is not
5737 * already enabled); it also disables any duplicate PTGs that it finds
5738 * in the older profiles (that are currently enabled).
5739 */
5740
5741 LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5742 ice_vsig_prof, list) {
5743 u16 i;
5744
5745 for (i = 0; i < t->tcam_count; i++) {
5746 bool used;
5747
5748 /* Scan the priorities from newest to oldest.
5749 * Make sure that the newest profiles take priority.
5750 */
5751 used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used,
5752 attr_used, attr_used_cnt);
5753
5754 if (used && t->tcam[i].in_use) {
5755 /* need to mark this PTG as never match, as it
5756 * was already in use and therefore duplicate
5757 * (and lower priority)
5758 */
5759 status = ice_prof_tcam_ena_dis(hw, blk, false,
5760 vsig,
5761 &t->tcam[i],
5762 chg);
5763 if (status)
5764 goto err_ice_adj_prof_priorities;
5765 } else if (!used && !t->tcam[i].in_use) {
5766 /* need to enable this PTG, as it in not in use
5767 * and not enabled (highest priority)
5768 */
5769 status = ice_prof_tcam_ena_dis(hw, blk, true,
5770 vsig,
5771 &t->tcam[i],
5772 chg);
5773 if (status)
5774 goto err_ice_adj_prof_priorities;
5775 }
5776
5777 /* keep track of used ptgs */
5778 ice_set_bit(t->tcam[i].ptg, ptgs_used);
5779 if (attr_used_cnt < ICE_MAX_PTG_ATTRS)
5780 attr_used[attr_used_cnt++] = &t->tcam[i];
5781 else
5782 ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n");
5783 }
5784 }
5785
5786 err_ice_adj_prof_priorities:
5787 ice_free(hw, attr_used);
5788 return status;
5789 }
5790
5791 /**
5792 * ice_add_prof_id_vsig - add profile to VSIG
5793 * @hw: pointer to the HW struct
5794 * @blk: hardware block
5795 * @vsig: the VSIG to which this profile is to be added
5796 * @hdl: the profile handle indicating the profile to add
5797 * @rev: true to add entries to the end of the list
5798 * @chg: the change list
5799 */
5800 static enum ice_status
ice_add_prof_id_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,u64 hdl,bool rev,struct LIST_HEAD_TYPE * chg)5801 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5802 bool rev, struct LIST_HEAD_TYPE *chg)
5803 {
5804 /* Masks that ignore flags */
5805 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5806 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5807 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5808 enum ice_status status = ICE_SUCCESS;
5809 struct ice_prof_map *map;
5810 struct ice_vsig_prof *t;
5811 struct ice_chs_chg *p;
5812 u16 vsig_idx, i;
5813
5814 /* Error, if this VSIG already has this profile */
5815 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5816 return ICE_ERR_ALREADY_EXISTS;
5817
5818 /* new VSIG profile structure */
5819 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
5820 if (!t)
5821 return ICE_ERR_NO_MEMORY;
5822
5823 ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
5824 /* Get the details on the profile specified by the handle ID */
5825 map = ice_search_prof_id(hw, blk, hdl);
5826 if (!map) {
5827 status = ICE_ERR_DOES_NOT_EXIST;
5828 goto err_ice_add_prof_id_vsig;
5829 }
5830
5831 t->profile_cookie = map->profile_cookie;
5832 t->prof_id = map->prof_id;
5833 t->tcam_count = map->ptg_cnt;
5834
5835 /* create TCAM entries */
5836 for (i = 0; i < map->ptg_cnt; i++) {
5837 u16 tcam_idx;
5838
5839 /* add TCAM to change list */
5840 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5841 if (!p) {
5842 status = ICE_ERR_NO_MEMORY;
5843 goto err_ice_add_prof_id_vsig;
5844 }
5845
5846 /* allocate the TCAM entry index */
5847 /* for entries with empty attribute masks, allocate entry from
5848 * the bottom of the TCAM table; otherwise, allocate from the
5849 * top of the table in order to give it higher priority
5850 */
5851 status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
5852 &tcam_idx);
5853 if (status) {
5854 ice_free(hw, p);
5855 goto err_ice_add_prof_id_vsig;
5856 }
5857
5858 t->tcam[i].ptg = map->ptg[i];
5859 t->tcam[i].prof_id = map->prof_id;
5860 t->tcam[i].tcam_idx = tcam_idx;
5861 t->tcam[i].attr = map->attr[i];
5862 t->tcam[i].in_use = true;
5863
5864 p->type = ICE_TCAM_ADD;
5865 p->add_tcam_idx = true;
5866 p->prof_id = t->tcam[i].prof_id;
5867 p->ptg = t->tcam[i].ptg;
5868 p->vsig = vsig;
5869 p->tcam_idx = t->tcam[i].tcam_idx;
5870
5871 /* set don't care masks for TCAM flags */
5872 ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
5873
5874 /* write the TCAM entry */
5875 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5876 t->tcam[i].prof_id,
5877 t->tcam[i].ptg, vsig, 0,
5878 t->tcam[i].attr.flags, vl_msk,
5879 dc_msk, nm_msk);
5880 if (status) {
5881 ice_free(hw, p);
5882 goto err_ice_add_prof_id_vsig;
5883 }
5884
5885 /* log change */
5886 LIST_ADD(&p->list_entry, chg);
5887 }
5888
5889 /* add profile to VSIG */
5890 vsig_idx = vsig & ICE_VSIG_IDX_M;
5891 if (rev)
5892 LIST_ADD_TAIL(&t->list,
5893 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5894 else
5895 LIST_ADD(&t->list,
5896 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5897
5898 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5899 return status;
5900
5901 err_ice_add_prof_id_vsig:
5902 ice_release_lock(&hw->blk[blk].es.prof_map_lock);
5903 /* let caller clean up the change list */
5904 ice_free(hw, t);
5905 return status;
5906 }
5907
5908 /**
5909 * ice_create_prof_id_vsig - add a new VSIG with a single profile
5910 * @hw: pointer to the HW struct
5911 * @blk: hardware block
5912 * @vsi: the initial VSI that will be in VSIG
5913 * @hdl: the profile handle of the profile that will be added to the VSIG
5914 * @chg: the change list
5915 */
5916 static enum ice_status
ice_create_prof_id_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsi,u64 hdl,struct LIST_HEAD_TYPE * chg)5917 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5918 struct LIST_HEAD_TYPE *chg)
5919 {
5920 enum ice_status status;
5921 struct ice_chs_chg *p;
5922 u16 new_vsig;
5923
5924 p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
5925 if (!p)
5926 return ICE_ERR_NO_MEMORY;
5927
5928 new_vsig = ice_vsig_alloc(hw, blk);
5929 if (!new_vsig) {
5930 status = ICE_ERR_HW_TABLE;
5931 goto err_ice_create_prof_id_vsig;
5932 }
5933
5934 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5935 if (status)
5936 goto err_ice_create_prof_id_vsig;
5937
5938 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5939 if (status)
5940 goto err_ice_create_prof_id_vsig;
5941
5942 p->type = ICE_VSIG_ADD;
5943 p->vsi = vsi;
5944 p->orig_vsig = ICE_DEFAULT_VSIG;
5945 p->vsig = new_vsig;
5946
5947 LIST_ADD(&p->list_entry, chg);
5948
5949 return ICE_SUCCESS;
5950
5951 err_ice_create_prof_id_vsig:
5952 /* let caller clean up the change list */
5953 ice_free(hw, p);
5954 return status;
5955 }
5956
5957 /**
5958 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
5959 * @hw: pointer to the HW struct
5960 * @blk: hardware block
5961 * @vsi: the initial VSI that will be in VSIG
5962 * @lst: the list of profile that will be added to the VSIG
5963 * @new_vsig: return of new VSIG
5964 * @chg: the change list
5965 */
5966 static enum ice_status
ice_create_vsig_from_lst(struct ice_hw * hw,enum ice_block blk,u16 vsi,struct LIST_HEAD_TYPE * lst,u16 * new_vsig,struct LIST_HEAD_TYPE * chg)5967 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5968 struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
5969 struct LIST_HEAD_TYPE *chg)
5970 {
5971 struct ice_vsig_prof *t;
5972 enum ice_status status;
5973 u16 vsig;
5974
5975 vsig = ice_vsig_alloc(hw, blk);
5976 if (!vsig)
5977 return ICE_ERR_HW_TABLE;
5978
5979 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5980 if (status)
5981 return status;
5982
5983 LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
5984 /* Reverse the order here since we are copying the list */
5985 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5986 true, chg);
5987 if (status)
5988 return status;
5989 }
5990
5991 *new_vsig = vsig;
5992
5993 return ICE_SUCCESS;
5994 }
5995
5996 /**
5997 * ice_find_prof_vsig - find a VSIG with a specific profile handle
5998 * @hw: pointer to the HW struct
5999 * @blk: hardware block
6000 * @hdl: the profile handle of the profile to search for
6001 * @vsig: returns the VSIG with the matching profile
6002 */
6003 static bool
ice_find_prof_vsig(struct ice_hw * hw,enum ice_block blk,u64 hdl,u16 * vsig)6004 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
6005 {
6006 struct ice_vsig_prof *t;
6007 struct LIST_HEAD_TYPE lst;
6008 enum ice_status status;
6009
6010 INIT_LIST_HEAD(&lst);
6011
6012 t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
6013 if (!t)
6014 return false;
6015
6016 t->profile_cookie = hdl;
6017 LIST_ADD(&t->list, &lst);
6018
6019 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
6020
6021 LIST_DEL(&t->list);
6022 ice_free(hw, t);
6023
6024 return status == ICE_SUCCESS;
6025 }
6026
6027 /**
6028 * ice_add_vsi_flow - add VSI flow
6029 * @hw: pointer to the HW struct
6030 * @blk: hardware block
6031 * @vsi: input VSI
6032 * @vsig: target VSIG to include the input VSI
6033 *
6034 * Calling this function will add the VSI to a given VSIG and
6035 * update the HW tables accordingly. This call can be used to
6036 * add multiple VSIs to a VSIG if we know beforehand that those
6037 * VSIs have the same characteristics of the VSIG. This will
6038 * save time in generating a new VSIG and TCAMs till a match is
6039 * found and subsequent rollback when a matching VSIG is found.
6040 */
6041 enum ice_status
ice_add_vsi_flow(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig)6042 ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
6043 {
6044 struct ice_chs_chg *tmp, *del;
6045 struct LIST_HEAD_TYPE chg;
6046 enum ice_status status;
6047
6048 /* if target VSIG is default the move is invalid */
6049 if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
6050 return ICE_ERR_PARAM;
6051
6052 INIT_LIST_HEAD(&chg);
6053
6054 /* move VSI to the VSIG that matches */
6055 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6056 /* update hardware if success */
6057 if (!status)
6058 status = ice_upd_prof_hw(hw, blk, &chg);
6059
6060 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
6061 LIST_DEL(&del->list_entry);
6062 ice_free(hw, del);
6063 }
6064
6065 return status;
6066 }
6067
6068 /**
6069 * ice_add_prof_id_flow - add profile flow
6070 * @hw: pointer to the HW struct
6071 * @blk: hardware block
6072 * @vsi: the VSI to enable with the profile specified by ID
6073 * @hdl: profile handle
6074 *
6075 * Calling this function will update the hardware tables to enable the
6076 * profile indicated by the ID parameter for the VSIs specified in the VSI
6077 * array. Once successfully called, the flow will be enabled.
6078 */
6079 enum ice_status
ice_add_prof_id_flow(struct ice_hw * hw,enum ice_block blk,u16 vsi,u64 hdl)6080 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
6081 {
6082 struct ice_vsig_prof *tmp1, *del1;
6083 struct LIST_HEAD_TYPE union_lst;
6084 struct ice_chs_chg *tmp, *del;
6085 struct LIST_HEAD_TYPE chg;
6086 enum ice_status status;
6087 u16 vsig;
6088
6089 INIT_LIST_HEAD(&union_lst);
6090 INIT_LIST_HEAD(&chg);
6091
6092 /* Get profile */
6093 status = ice_get_prof(hw, blk, hdl, &chg);
6094 if (status)
6095 return status;
6096
6097 /* determine if VSI is already part of a VSIG */
6098 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
6099 if (!status && vsig) {
6100 bool only_vsi;
6101 u16 or_vsig;
6102 u16 ref;
6103
6104 /* found in VSIG */
6105 or_vsig = vsig;
6106
6107 /* make sure that there is no overlap/conflict between the new
6108 * characteristics and the existing ones; we don't support that
6109 * scenario
6110 */
6111 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
6112 status = ICE_ERR_ALREADY_EXISTS;
6113 goto err_ice_add_prof_id_flow;
6114 }
6115
6116 /* last VSI in the VSIG? */
6117 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
6118 if (status)
6119 goto err_ice_add_prof_id_flow;
6120 only_vsi = (ref == 1);
6121
6122 /* create a union of the current profiles and the one being
6123 * added
6124 */
6125 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
6126 if (status)
6127 goto err_ice_add_prof_id_flow;
6128
6129 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
6130 if (status)
6131 goto err_ice_add_prof_id_flow;
6132
6133 /* search for an existing VSIG with an exact charc match */
6134 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
6135 if (!status) {
6136 /* move VSI to the VSIG that matches */
6137 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6138 if (status)
6139 goto err_ice_add_prof_id_flow;
6140
6141 /* VSI has been moved out of or_vsig. If the or_vsig had
6142 * only that VSI it is now empty and can be removed.
6143 */
6144 if (only_vsi) {
6145 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
6146 if (status)
6147 goto err_ice_add_prof_id_flow;
6148 }
6149 } else if (only_vsi) {
6150 /* If the original VSIG only contains one VSI, then it
6151 * will be the requesting VSI. In this case the VSI is
6152 * not sharing entries and we can simply add the new
6153 * profile to the VSIG.
6154 */
6155 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
6156 &chg);
6157 if (status)
6158 goto err_ice_add_prof_id_flow;
6159
6160 /* Adjust priorities */
6161 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
6162 if (status)
6163 goto err_ice_add_prof_id_flow;
6164 } else {
6165 /* No match, so we need a new VSIG */
6166 status = ice_create_vsig_from_lst(hw, blk, vsi,
6167 &union_lst, &vsig,
6168 &chg);
6169 if (status)
6170 goto err_ice_add_prof_id_flow;
6171
6172 /* Adjust priorities */
6173 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
6174 if (status)
6175 goto err_ice_add_prof_id_flow;
6176 }
6177 } else {
6178 /* need to find or add a VSIG */
6179 /* search for an existing VSIG with an exact charc match */
6180 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
6181 /* found an exact match */
6182 /* add or move VSI to the VSIG that matches */
6183 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6184 if (status)
6185 goto err_ice_add_prof_id_flow;
6186 } else {
6187 /* we did not find an exact match */
6188 /* we need to add a VSIG */
6189 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
6190 &chg);
6191 if (status)
6192 goto err_ice_add_prof_id_flow;
6193 }
6194 }
6195
6196 /* update hardware */
6197 if (!status)
6198 status = ice_upd_prof_hw(hw, blk, &chg);
6199
6200 err_ice_add_prof_id_flow:
6201 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
6202 LIST_DEL(&del->list_entry);
6203 ice_free(hw, del);
6204 }
6205
6206 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
6207 LIST_DEL(&del1->list);
6208 ice_free(hw, del1);
6209 }
6210
6211 return status;
6212 }
6213
6214 /**
6215 * ice_rem_prof_from_list - remove a profile from list
6216 * @hw: pointer to the HW struct
6217 * @lst: list to remove the profile from
6218 * @hdl: the profile handle indicating the profile to remove
6219 */
6220 static enum ice_status
ice_rem_prof_from_list(struct ice_hw * hw,struct LIST_HEAD_TYPE * lst,u64 hdl)6221 ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
6222 {
6223 struct ice_vsig_prof *ent, *tmp;
6224
6225 LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list)
6226 if (ent->profile_cookie == hdl) {
6227 LIST_DEL(&ent->list);
6228 ice_free(hw, ent);
6229 return ICE_SUCCESS;
6230 }
6231
6232 return ICE_ERR_DOES_NOT_EXIST;
6233 }
6234
6235 /**
6236 * ice_rem_prof_id_flow - remove flow
6237 * @hw: pointer to the HW struct
6238 * @blk: hardware block
6239 * @vsi: the VSI from which to remove the profile specified by ID
6240 * @hdl: profile tracking handle
6241 *
6242 * Calling this function will update the hardware tables to remove the
6243 * profile indicated by the ID parameter for the VSIs specified in the VSI
6244 * array. Once successfully called, the flow will be disabled.
6245 */
6246 enum ice_status
ice_rem_prof_id_flow(struct ice_hw * hw,enum ice_block blk,u16 vsi,u64 hdl)6247 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
6248 {
6249 struct ice_vsig_prof *tmp1, *del1;
6250 struct LIST_HEAD_TYPE chg, copy;
6251 struct ice_chs_chg *tmp, *del;
6252 enum ice_status status;
6253 u16 vsig;
6254
6255 INIT_LIST_HEAD(©);
6256 INIT_LIST_HEAD(&chg);
6257
6258 /* determine if VSI is already part of a VSIG */
6259 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
6260 if (!status && vsig) {
6261 bool last_profile;
6262 bool only_vsi;
6263 u16 ref;
6264
6265 /* found in VSIG */
6266 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
6267 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
6268 if (status)
6269 goto err_ice_rem_prof_id_flow;
6270 only_vsi = (ref == 1);
6271
6272 if (only_vsi) {
6273 /* If the original VSIG only contains one reference,
6274 * which will be the requesting VSI, then the VSI is not
6275 * sharing entries and we can simply remove the specific
6276 * characteristics from the VSIG.
6277 */
6278
6279 if (last_profile) {
6280 /* If there are no profiles left for this VSIG,
6281 * then simply remove the VSIG.
6282 */
6283 status = ice_rem_vsig(hw, blk, vsig, &chg);
6284 if (status)
6285 goto err_ice_rem_prof_id_flow;
6286 } else {
6287 status = ice_rem_prof_id_vsig(hw, blk, vsig,
6288 hdl, &chg);
6289 if (status)
6290 goto err_ice_rem_prof_id_flow;
6291
6292 /* Adjust priorities */
6293 status = ice_adj_prof_priorities(hw, blk, vsig,
6294 &chg);
6295 if (status)
6296 goto err_ice_rem_prof_id_flow;
6297 }
6298
6299 } else {
6300 /* Make a copy of the VSIG's list of Profiles */
6301 status = ice_get_profs_vsig(hw, blk, vsig, ©);
6302 if (status)
6303 goto err_ice_rem_prof_id_flow;
6304
6305 /* Remove specified profile entry from the list */
6306 status = ice_rem_prof_from_list(hw, ©, hdl);
6307 if (status)
6308 goto err_ice_rem_prof_id_flow;
6309
6310 if (LIST_EMPTY(©)) {
6311 status = ice_move_vsi(hw, blk, vsi,
6312 ICE_DEFAULT_VSIG, &chg);
6313 if (status)
6314 goto err_ice_rem_prof_id_flow;
6315
6316 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
6317 &vsig)) {
6318 /* found an exact match */
6319 /* add or move VSI to the VSIG that matches */
6320 /* Search for a VSIG with a matching profile
6321 * list
6322 */
6323
6324 /* Found match, move VSI to the matching VSIG */
6325 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6326 if (status)
6327 goto err_ice_rem_prof_id_flow;
6328 } else {
6329 /* since no existing VSIG supports this
6330 * characteristic pattern, we need to create a
6331 * new VSIG and TCAM entries
6332 */
6333 status = ice_create_vsig_from_lst(hw, blk, vsi,
6334 ©, &vsig,
6335 &chg);
6336 if (status)
6337 goto err_ice_rem_prof_id_flow;
6338
6339 /* Adjust priorities */
6340 status = ice_adj_prof_priorities(hw, blk, vsig,
6341 &chg);
6342 if (status)
6343 goto err_ice_rem_prof_id_flow;
6344 }
6345 }
6346 } else {
6347 status = ICE_ERR_DOES_NOT_EXIST;
6348 }
6349
6350 /* update hardware tables */
6351 if (!status)
6352 status = ice_upd_prof_hw(hw, blk, &chg);
6353
6354 err_ice_rem_prof_id_flow:
6355 LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
6356 LIST_DEL(&del->list_entry);
6357 ice_free(hw, del);
6358 }
6359
6360 LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) {
6361 LIST_DEL(&del1->list);
6362 ice_free(hw, del1);
6363 }
6364
6365 return status;
6366 }
6367
6368 /**
6369 * ice_flow_assoc_hw_prof - add profile id flow for main/ctrl VSI flow entry
6370 * @hw: pointer to the HW struct
6371 * @blk: HW block
6372 * @dest_vsi_handle: dest VSI handle
6373 * @fdir_vsi_handle: fdir programming VSI handle
6374 * @id: profile id (handle)
6375 *
6376 * Calling this function will update the hardware tables to enable the
6377 * profile indicated by the ID parameter for the VSIs specified in the VSI
6378 * array. Once successfully called, the flow will be enabled.
6379 */
6380 enum ice_status
ice_flow_assoc_hw_prof(struct ice_hw * hw,enum ice_block blk,u16 dest_vsi_handle,u16 fdir_vsi_handle,int id)6381 ice_flow_assoc_hw_prof(struct ice_hw *hw, enum ice_block blk,
6382 u16 dest_vsi_handle, u16 fdir_vsi_handle, int id)
6383 {
6384 enum ice_status status = ICE_SUCCESS;
6385 u16 vsi_num;
6386
6387 vsi_num = ice_get_hw_vsi_num(hw, dest_vsi_handle);
6388 status = ice_add_prof_id_flow(hw, blk, vsi_num, id);
6389 if (status) {
6390 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed for main VSI flow entry, %d\n",
6391 status);
6392 goto err_add_prof;
6393 }
6394
6395 if (blk != ICE_BLK_FD)
6396 return status;
6397
6398 vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi_handle);
6399 status = ice_add_prof_id_flow(hw, blk, vsi_num, id);
6400 if (status) {
6401 ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed for ctrl VSI flow entry, %d\n",
6402 status);
6403 goto err_add_entry;
6404 }
6405
6406 return status;
6407
6408 err_add_entry:
6409 vsi_num = ice_get_hw_vsi_num(hw, dest_vsi_handle);
6410 ice_rem_prof_id_flow(hw, blk, vsi_num, id);
6411 err_add_prof:
6412 ice_flow_rem_prof(hw, blk, id);
6413
6414 return status;
6415 }
6416