1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Inc. ([email protected]). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
21 * permission.
22
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40 /**
41 * @file
42 *
43 * Support library for the hardware Packet Output unit.
44 *
45 * <hr>$Revision: 70030 $<hr>
46 */
47 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
48 #include <asm/octeon/cvmx.h>
49 #include <asm/octeon/cvmx-config.h>
50 #include <asm/octeon/cvmx-pko.h>
51 #include <asm/octeon/cvmx-helper.h>
52 #include <asm/octeon/cvmx-helper-cfg.h>
53 #include <asm/octeon/cvmx-clock.h>
54 #else
55 #if !defined(__FreeBSD__) || !defined(_KERNEL)
56 #include "executive-config.h"
57 #endif
58 #include "cvmx.h"
59 #include "cvmx-sysinfo.h"
60 #if !defined(__FreeBSD__) || !defined(_KERNEL)
61 #include "cvmx-config.h"
62 #endif
63 #include "cvmx-pko.h"
64 #include "cvmx-helper.h"
65 #include "cvmx-helper-cfg.h"
66 #endif
67
68 /* #define PKO_DEBUG */
69
70 #define CVMX_PKO_NQ_PER_PORT_MAX 32
71
72 /**
73 * Internal state of packet output
74 */
75 #ifdef CVMX_ENABLE_PKO_FUNCTIONS
76
77 /*
78 * PKO port iterator
79 */
80 #define CVMX_PKO_FOR_EACH_PORT_BEGIN do { \
81 int XIT_pko_port; \
82 for (XIT_pko_port = 0; XIT_pko_port < CVMX_HELPER_CFG_MAX_PKO_PORT; \
83 XIT_pko_port++) \
84 { \
85 if (__cvmx_helper_cfg_pko_queue_base(XIT_pko_port) != \
86 CVMX_HELPER_CFG_INVALID_VALUE)
87
88 #define CVMX_PKO_FOR_EACH_PORT_END } /* for */ \
89 } while (0)
90
91 /*
92 * @INTERNAL
93 *
94 * Get INT for a port
95 *
96 * @param interface
97 * @param index
98 * @return the INT value on success and -1 on error
99 */
__cvmx_pko_int(int interface,int index)100 static int __cvmx_pko_int(int interface, int index)
101 {
102 cvmx_helper_cfg_assert(interface < CVMX_HELPER_CFG_MAX_IFACE);
103 cvmx_helper_cfg_assert(index >= 0);
104
105 switch (interface)
106 {
107 case 0:
108 cvmx_helper_cfg_assert(index < 4);
109 return index;
110 break;
111 case 1:
112 cvmx_helper_cfg_assert(index == 0);
113 return 4;
114 break;
115 case 2:
116 cvmx_helper_cfg_assert(index < 4);
117 return index + 8;
118 break;
119 case 3:
120 cvmx_helper_cfg_assert(index < 4);
121 return index + 0xC;
122 break;
123 case 4:
124 cvmx_helper_cfg_assert(index < 4);
125 return index + 0x10;
126 break;
127 case 5:
128 cvmx_helper_cfg_assert(index < 256);
129 return 0x1C;
130 break;
131 case 6:
132 cvmx_helper_cfg_assert(index < 256);
133 return 0x1D;
134 break;
135 case 7:
136 cvmx_helper_cfg_assert(index < 32);
137 return 0x1E;
138 break;
139 case 8:
140 cvmx_helper_cfg_assert(index < 8);
141 return 0x1F;
142 break;
143 }
144
145 return -1;
146 }
147
cvmx_pko_get_base_pko_port(int interface,int index)148 int cvmx_pko_get_base_pko_port(int interface, int index)
149 {
150 if (octeon_has_feature(OCTEON_FEATURE_PKND))
151 return __cvmx_helper_cfg_pko_port_base(interface, index);
152 else
153 return cvmx_helper_get_ipd_port(interface, index);
154 }
155 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
156 EXPORT_SYMBOL(cvmx_pko_get_base_pko_port);
157 #endif
158
cvmx_pko_get_num_pko_ports(int interface,int index)159 int cvmx_pko_get_num_pko_ports(int interface, int index)
160 {
161 if (octeon_has_feature(OCTEON_FEATURE_PKND))
162 return __cvmx_helper_cfg_pko_port_num(interface, index);
163 else
164 return 1;
165 }
166 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
167 EXPORT_SYMBOL(cvmx_pko_get_num_pko_ports);
168 #endif
169
cvmx_pko_get_base_queue(int port)170 int cvmx_pko_get_base_queue(int port)
171 {
172 if (octeon_has_feature(OCTEON_FEATURE_PKND))
173 {
174 return __cvmx_helper_cfg_pko_queue_base(
175 cvmx_helper_cfg_ipd2pko_port_base(port));
176 }
177 else
178 return cvmx_pko_get_base_queue_per_core(port, 0);
179 }
180 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
181 EXPORT_SYMBOL(cvmx_pko_get_base_queue);
182 #endif
183
184 /**
185 * For a given PKO port number, return the base output queue
186 * for the port.
187 *
188 * @param pko_port PKO port number
189 * @return Base output queue
190 */
cvmx_pko_get_base_queue_pkoid(int pko_port)191 int cvmx_pko_get_base_queue_pkoid(int pko_port)
192 {
193 return __cvmx_helper_cfg_pko_queue_base(pko_port);
194 }
195
196 /**
197 * For a given PKO port number, return the number of output queues
198 * for the port.
199 *
200 * @param pko_port PKO port number
201 * @return the number of output queues
202 */
cvmx_pko_get_num_queues_pkoid(int pko_port)203 int cvmx_pko_get_num_queues_pkoid(int pko_port)
204 {
205 return __cvmx_helper_cfg_pko_queue_num(pko_port);
206 }
207
cvmx_pko_get_num_queues(int port)208 int cvmx_pko_get_num_queues(int port)
209 {
210 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
211 {
212 return __cvmx_helper_cfg_pko_queue_num(
213 cvmx_helper_cfg_ipd2pko_port_base(port));
214 }
215 else
216 {
217 if (port < 16)
218 return CVMX_PKO_QUEUES_PER_PORT_INTERFACE0;
219 else if (port < 32)
220 return CVMX_PKO_QUEUES_PER_PORT_INTERFACE1;
221 else if (port < 36)
222 return CVMX_PKO_QUEUES_PER_PORT_PCI;
223 else if (port < 40)
224 return CVMX_PKO_QUEUES_PER_PORT_LOOP;
225 else if (port < 42)
226 return CVMX_PKO_QUEUES_PER_PORT_SRIO0;
227 else if (port < 44)
228 return CVMX_PKO_QUEUES_PER_PORT_SRIO1;
229 else if (port < 46)
230 return CVMX_PKO_QUEUES_PER_PORT_SRIO2;
231 }
232 return 0;
233 }
234 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
235 EXPORT_SYMBOL(cvmx_pko_get_num_queues);
236 #endif
237
238 #ifdef PKO_DEBUG
239 /**
240 * Show queues for the internal ports
241 */
cvmx_pko_show_queue_map(void)242 void cvmx_pko_show_queue_map(void)
243 {
244 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
245 {
246 CVMX_PKO_FOR_EACH_PORT_BEGIN {
247 cvmx_dprintf("pko_port %d (interface%d index%d) has %d queues (queue base = %d)\n",
248 XIT_pko_port,
249 __cvmx_helper_cfg_pko_port_interface(XIT_pko_port),
250 __cvmx_helper_cfg_pko_port_index(XIT_pko_port),
251 __cvmx_helper_cfg_pko_queue_num(XIT_pko_port),
252 __cvmx_helper_cfg_pko_queue_base(XIT_pko_port));
253 } CVMX_PKO_FOR_EACH_PORT_END;
254 }
255 else
256 {
257 int core, port;
258 int pko_output_ports;
259
260 pko_output_ports = 36;
261 cvmx_dprintf("port");
262 for(port = 0; port < pko_output_ports; port++)
263 cvmx_dprintf("%3d ", port);
264 cvmx_dprintf("\n");
265
266 for(core = 0; core < CVMX_MAX_CORES; core++)
267 {
268 cvmx_dprintf("\n%2d: ", core);
269 for(port = 0; port < pko_output_ports; port++)
270 cvmx_dprintf("%3d ",
271 cvmx_pko_get_base_queue_per_core(port, core));
272 }
273 cvmx_dprintf("\n");
274
275 }
276 }
277 #endif /* PKO_DEBUG */
278
279 /*
280 * Configure queues for an internal port.
281 * @INTERNAL
282 * @param pko_port PKO internal port number
283 * Note: o68 only
284 */
__cvmx_pko_iport_config(int pko_port)285 static void __cvmx_pko_iport_config(int pko_port)
286 {
287 int queue, base_queue, num_queues;
288 int static_priority_base;
289 int static_priority_end;
290 cvmx_pko_mem_iqueue_ptrs_t config;
291 uint64_t *buf_ptr = NULL;
292 uint64_t priorities[CVMX_PKO_NQ_PER_PORT_MAX] = {
293 [0 ... CVMX_PKO_NQ_PER_PORT_MAX - 1] = 8 };
294
295 static_priority_base = -1;
296 static_priority_end = -1;
297 base_queue = __cvmx_helper_cfg_pko_queue_base(pko_port);
298 num_queues = __cvmx_helper_cfg_pko_queue_num(pko_port);
299
300 /*
301 * Give the user a chance to override the per queue priorities.
302 */
303 if (cvmx_override_pko_queue_priority)
304 cvmx_override_pko_queue_priority(pko_port, &priorities[0]);
305
306 /*
307 * static queue priority validation
308 */
309 for (queue = 0; queue < num_queues; queue++)
310 {
311 if (static_priority_base == -1 &&
312 priorities[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
313 static_priority_base = queue;
314
315 if (static_priority_base != -1 &&
316 static_priority_end == -1 &&
317 priorities[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY &&
318 queue)
319 static_priority_end = queue - 1;
320 else if (static_priority_base != -1 &&
321 static_priority_end == -1 &&
322 queue == num_queues - 1)
323 static_priority_end = queue; /* all queues are static priority */
324
325 /*
326 * Check to make sure all static priority queues are contiguous.
327 * Also catches some cases of static priorites not starting from
328 * queue 0.
329 */
330 if (static_priority_end != -1 &&
331 (int)queue > static_priority_end &&
332 priorities[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
333 {
334 cvmx_dprintf("ERROR: __cvmx_pko_iport_config: Static priority "
335 "queues aren't contiguous or don't start at base queue. "
336 "q: %d, eq: %d\n", (int)queue, static_priority_end);
337 }
338 if (static_priority_base > 0)
339 {
340 cvmx_dprintf("ERROR: __cvmx_pko_iport_config: Static priority "
341 "queues don't start at base queue. sq: %d\n",
342 static_priority_base);
343 }
344 }
345
346 /*
347 * main loop to set the fields of CVMX_PKO_MEM_IQUEUE_PTRS for
348 * each queue
349 */
350 for (queue = 0; queue < num_queues; queue++)
351 {
352 config.u64 = 0;
353 config.s.index = queue;
354 config.s.qid = base_queue + queue;
355 config.s.ipid = pko_port;
356 config.s.tail = (queue == (num_queues - 1));
357 config.s.s_tail = (queue == static_priority_end);
358 config.s.static_p = (static_priority_base >= 0);
359 config.s.static_q = (queue <= static_priority_end);
360
361 /*
362 * Convert the priority into an enable bit field.
363 * Try to space the bits out evenly so the packet
364 * don't get grouped up.
365 */
366 switch ((int)priorities[queue])
367 {
368 case 0: config.s.qos_mask = 0x00; break;
369 case 1: config.s.qos_mask = 0x01; break;
370 case 2: config.s.qos_mask = 0x11; break;
371 case 3: config.s.qos_mask = 0x49; break;
372 case 4: config.s.qos_mask = 0x55; break;
373 case 5: config.s.qos_mask = 0x57; break;
374 case 6: config.s.qos_mask = 0x77; break;
375 case 7: config.s.qos_mask = 0x7f; break;
376 case 8: config.s.qos_mask = 0xff; break;
377 case CVMX_PKO_QUEUE_STATIC_PRIORITY:
378 config.s.qos_mask = 0xff;
379 break;
380 default:
381 cvmx_dprintf("ERROR: __cvmx_pko_iport_config: "
382 "Invalid priority %llu\n",
383 (unsigned long long)priorities[queue]);
384 config.s.qos_mask = 0xff;
385 break;
386 }
387
388 /*
389 * The command queues
390 */
391 {
392 cvmx_cmd_queue_result_t cmd_res;
393
394 cmd_res = cvmx_cmd_queue_initialize(
395 CVMX_CMD_QUEUE_PKO(base_queue + queue),
396 CVMX_PKO_MAX_QUEUE_DEPTH,
397 CVMX_FPA_OUTPUT_BUFFER_POOL,
398 (CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
399 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
400
401 if (cmd_res != CVMX_CMD_QUEUE_SUCCESS)
402 {
403 switch (cmd_res)
404 {
405 case CVMX_CMD_QUEUE_NO_MEMORY:
406 cvmx_dprintf("ERROR: __cvmx_pko_iport_config: Unable to allocate output buffer.");
407 break;
408 case CVMX_CMD_QUEUE_ALREADY_SETUP:
409 cvmx_dprintf("ERROR: __cvmx_pko_iport_config: Port already setup");
410 break;
411 case CVMX_CMD_QUEUE_INVALID_PARAM:
412 default:
413 cvmx_dprintf("ERROR: __cvmx_pko_iport_config: Command queue initialization failed.");
414 break;
415 }
416 cvmx_dprintf(" pko_port%d base_queue%d num_queues%d queue%d.\n",
417 pko_port, base_queue, num_queues, queue);
418 }
419
420 buf_ptr = (uint64_t*)cvmx_cmd_queue_buffer(
421 CVMX_CMD_QUEUE_PKO(base_queue + queue));
422 config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
423 }
424
425 CVMX_SYNCWS;
426 cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
427 }
428 }
429
430 /*
431 * Allocate queues for the PKO internal ports.
432 * @INTERNAL
433 *
434 */
__cvmx_pko_queue_alloc_o68(void)435 static void __cvmx_pko_queue_alloc_o68(void)
436 {
437 CVMX_PKO_FOR_EACH_PORT_BEGIN {
438 __cvmx_pko_iport_config(XIT_pko_port);
439 } CVMX_PKO_FOR_EACH_PORT_END;
440 }
441
442 /*
443 * Allocate memory for PKO engines.
444 *
445 * @param engine is the PKO engine ID.
446 * @return # of 2KB-chunks allocated to this PKO engine.
447 */
__cvmx_pko_memory_per_engine_o68(int engine)448 static int __cvmx_pko_memory_per_engine_o68(int engine)
449 {
450 /* CN68XX has 40KB to devide between the engines in 2KB chunks */
451 int max_engine;
452 int size_per_engine;
453 int size;
454
455 max_engine = __cvmx_helper_cfg_pko_max_engine();
456 size_per_engine = 40 / 2 / max_engine;
457
458 if (engine >= max_engine)
459 {
460 /* Unused engines get no space */
461 size = 0;
462 }
463 else if (engine == max_engine - 1)
464 {
465 /* The last engine gets all the space lost by rounding. This means
466 the ILK gets the most space */
467 size = 40 / 2 - engine * size_per_engine;
468 }
469 else
470 {
471 /* All other engines get the same space */
472 size = size_per_engine;
473 }
474
475 return size;
476 }
477
478 /*
479 * Setup one-to-one mapping between PKO iport and eport.
480 * @INTERNAL
481 */
__cvmx_pko_port_map_o68(void)482 static void __cvmx_pko_port_map_o68(void)
483 {
484 int i;
485 int interface, index;
486 cvmx_helper_interface_mode_t mode;
487 cvmx_pko_mem_iport_ptrs_t config;
488
489 /*
490 * Initialize every iport with the invalid eid.
491 */
492 #define CVMX_O68_PKO_INVALID_EID 31
493 config.u64 = 0;
494 config.s.eid = CVMX_O68_PKO_INVALID_EID;
495 for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
496 {
497 config.s.ipid = i;
498 cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
499 }
500
501 /*
502 * Set up PKO_MEM_IPORT_PTRS
503 */
504 CVMX_PKO_FOR_EACH_PORT_BEGIN {
505 interface = __cvmx_helper_cfg_pko_port_interface(XIT_pko_port);
506 index = __cvmx_helper_cfg_pko_port_index(XIT_pko_port);
507 mode = cvmx_helper_interface_get_mode(interface);
508
509 if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
510 continue;
511
512 config.s.ipid = XIT_pko_port;
513 config.s.qos_mask = 0xff;
514 config.s.crc = __cvmx_helper_get_has_fcs(interface);
515 config.s.min_pkt = __cvmx_helper_get_pko_padding(interface);
516 config.s.intr = __cvmx_pko_int(interface, index);
517 config.s.eid = __cvmx_helper_cfg_pko_port_eid(XIT_pko_port);
518 config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ? index :
519 XIT_pko_port;
520 cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
521 } CVMX_PKO_FOR_EACH_PORT_END;
522 }
523
__cvmx_pko_get_pipe(int interface,int index)524 int __cvmx_pko_get_pipe(int interface, int index)
525 {
526 /*
527 * the loopback ports do not have pipes
528 */
529 if (cvmx_helper_interface_get_mode(interface) ==
530 CVMX_HELPER_INTERFACE_MODE_LOOP)
531 return -1;
532 /*
533 * We use pko_port as the pipe. See __cvmx_pko_port_map_o68().
534 */
535 return cvmx_helper_get_pko_port(interface, index);
536 }
537
538 /*
539 * chip-specific setup
540 * @INTERNAL
541 */
__cvmx_pko_chip_init(void)542 static void __cvmx_pko_chip_init(void)
543 {
544 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
545 {
546 __cvmx_pko_port_map_o68();
547 __cvmx_pko_queue_alloc_o68();
548 }
549 else
550 {
551 int i;
552 uint64_t priority = 8;
553
554 /*
555 * Initialize queues
556 */
557 for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
558 cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
559 &priority);
560 }
561 }
562
563 /**
564 * Call before any other calls to initialize the packet
565 * output system. This does chip global config, and should only be
566 * done by one core.
567 */
568
cvmx_pko_initialize_global(void)569 void cvmx_pko_initialize_global(void)
570 {
571 cvmx_pko_reg_cmd_buf_t config;
572 int i;
573
574 /*
575 * Set the size of the PKO command buffers to an odd number of 64bit
576 * words. This allows the normal two word send to stay aligned and never
577 * span a command word buffer.
578 */
579 config.u64 = 0;
580 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
581 config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;
582 cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
583
584 /*
585 * chip-specific setup.
586 */
587 __cvmx_pko_chip_init();
588
589 /*
590 * If we aren't using all of the queues optimize PKO's internal memory.
591 */
592 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) ||
593 OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX) ||
594 OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
595 {
596 int num_interfaces;
597 int last_port;
598 int max_queues;
599
600 if (octeon_has_feature(OCTEON_FEATURE_PKND))
601 max_queues = __cvmx_helper_cfg_pko_max_queue();
602 else
603 {
604 num_interfaces = cvmx_helper_get_number_of_interfaces();
605 last_port = cvmx_helper_get_last_ipd_port(num_interfaces-1);
606 max_queues = cvmx_pko_get_base_queue(last_port) +
607 cvmx_pko_get_num_queues(last_port);
608 }
609
610 if (OCTEON_IS_MODEL(OCTEON_CN38XX))
611 {
612 if (max_queues <= 32)
613 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
614 else if (max_queues <= 64)
615 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
616 else
617 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 0);
618 }
619 else
620 {
621 if (OCTEON_IS_MODEL(OCTEON_CN68XX) && max_queues <= 32)
622 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 3);
623 else if (max_queues <= 64)
624 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
625 else if (max_queues <= 128)
626 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
627 else
628 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 0);
629 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
630 {
631 for (i = 0; i < 2; i++)
632 {
633 cvmx_pko_reg_engine_storagex_t engine_storage;
634
635 #define PKO_ASSIGN_ENGINE_STORAGE(index) \
636 engine_storage.s.engine##index = \
637 __cvmx_pko_memory_per_engine_o68(16 * i + (index))
638
639 engine_storage.u64 = 0;
640 PKO_ASSIGN_ENGINE_STORAGE(0);
641 PKO_ASSIGN_ENGINE_STORAGE(1);
642 PKO_ASSIGN_ENGINE_STORAGE(2);
643 PKO_ASSIGN_ENGINE_STORAGE(3);
644 PKO_ASSIGN_ENGINE_STORAGE(4);
645 PKO_ASSIGN_ENGINE_STORAGE(5);
646 PKO_ASSIGN_ENGINE_STORAGE(6);
647 PKO_ASSIGN_ENGINE_STORAGE(7);
648 PKO_ASSIGN_ENGINE_STORAGE(8);
649 PKO_ASSIGN_ENGINE_STORAGE(9);
650 PKO_ASSIGN_ENGINE_STORAGE(10);
651 PKO_ASSIGN_ENGINE_STORAGE(11);
652 PKO_ASSIGN_ENGINE_STORAGE(12);
653 PKO_ASSIGN_ENGINE_STORAGE(13);
654 PKO_ASSIGN_ENGINE_STORAGE(14);
655 PKO_ASSIGN_ENGINE_STORAGE(15);
656 cvmx_write_csr(CVMX_PKO_REG_ENGINE_STORAGEX(i),
657 engine_storage.u64);
658 }
659 }
660 }
661 }
662 }
663
664 /**
665 * This function does per-core initialization required by the PKO routines.
666 * This must be called on all cores that will do packet output, and must
667 * be called after the FPA has been initialized and filled with pages.
668 *
669 * @return 0 on success
670 * !0 on failure
671 */
cvmx_pko_initialize_local(void)672 int cvmx_pko_initialize_local(void)
673 {
674 /* Nothing to do */
675 return 0;
676 }
677 #endif
678
679 /**
680 * Enables the packet output hardware. It must already be
681 * configured.
682 */
cvmx_pko_enable(void)683 void cvmx_pko_enable(void)
684 {
685 cvmx_pko_reg_flags_t flags;
686
687 flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
688 if (flags.s.ena_pko)
689 cvmx_dprintf("Warning: Enabling PKO when PKO already enabled.\n");
690
691 flags.s.ena_dwb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB);
692 flags.s.ena_pko = 1;
693 flags.s.store_be =1; /*
694 * always enable big endian for 3-word command.
695 * Does nothing for 2-word.
696 */
697 cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64);
698 }
699
700 /**
701 * Disables the packet output. Does not affect any configuration.
702 */
cvmx_pko_disable(void)703 void cvmx_pko_disable(void)
704 {
705 cvmx_pko_reg_flags_t pko_reg_flags;
706 pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
707 pko_reg_flags.s.ena_pko = 0;
708 cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
709 }
710
711 #ifdef CVMX_ENABLE_PKO_FUNCTIONS
712 /**
713 * @INTERNAL
714 * Reset the packet output.
715 */
__cvmx_pko_reset(void)716 static void __cvmx_pko_reset(void)
717 {
718 cvmx_pko_reg_flags_t pko_reg_flags;
719 pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
720 pko_reg_flags.s.reset = 1;
721 cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
722 }
723
724 /**
725 * Shutdown and free resources required by packet output.
726 */
cvmx_pko_shutdown(void)727 void cvmx_pko_shutdown(void)
728 {
729 int queue;
730
731 cvmx_pko_disable();
732
733 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
734 {
735 cvmx_pko_mem_iqueue_ptrs_t config;
736 config.u64 = 0;
737 for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++)
738 {
739 config.s.qid = queue;
740 cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
741 cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
742 }
743 }
744 else
745 {
746 cvmx_pko_mem_queue_ptrs_t config;
747 for (queue=0; queue<CVMX_PKO_MAX_OUTPUT_QUEUES; queue++)
748 {
749 config.u64 = 0;
750 config.s.tail = 1;
751 config.s.index = 0;
752 config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
753 config.s.queue = queue & 0x7f;
754 config.s.qos_mask = 0;
755 config.s.buf_ptr = 0;
756 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
757 {
758 cvmx_pko_reg_queue_ptrs1_t config1;
759 config1.u64 = 0;
760 config1.s.qid7 = queue >> 7;
761 cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
762 }
763 cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
764 cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
765 }
766 }
767
768 __cvmx_pko_reset();
769 }
770
771 /**
772 * Configure a output port and the associated queues for use.
773 *
774 * @param port Port to configure.
775 * @param base_queue First queue number to associate with this port.
776 * @param num_queues Number of queues to associate with this port
777 * @param priority Array of priority levels for each queue. Values are
778 * allowed to be 0-8. A value of 8 get 8 times the traffic
779 * of a value of 1. A value of 0 indicates that no rounds
780 * will be participated in. These priorities can be changed
781 * on the fly while the pko is enabled. A priority of 9
782 * indicates that static priority should be used. If static
783 * priority is used all queues with static priority must be
784 * contiguous starting at the base_queue, and lower numbered
785 * queues have higher priority than higher numbered queues.
786 * There must be num_queues elements in the array.
787 */
cvmx_pko_config_port(uint64_t port,uint64_t base_queue,uint64_t num_queues,const uint64_t priority[])788 cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
789 uint64_t num_queues, const uint64_t priority[])
790 {
791 cvmx_pko_status_t result_code;
792 uint64_t queue;
793 cvmx_pko_mem_queue_ptrs_t config;
794 cvmx_pko_reg_queue_ptrs1_t config1;
795 int static_priority_base = -1;
796 int static_priority_end = -1;
797
798 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
799 return CVMX_PKO_SUCCESS;
800
801 if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS) &&
802 (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID))
803 {
804 cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
805 (unsigned long long)port);
806 return CVMX_PKO_INVALID_PORT;
807 }
808
809 if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES)
810 {
811 cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
812 (unsigned long long)(base_queue + num_queues));
813 return CVMX_PKO_INVALID_QUEUE;
814 }
815
816 if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)
817 {
818 /*
819 * Validate the static queue priority setup and set
820 * static_priority_base and static_priority_end accordingly.
821 */
822 for (queue = 0; queue < num_queues; queue++)
823 {
824 /* Find first queue of static priority */
825 if (static_priority_base == -1 && priority[queue] ==
826 CVMX_PKO_QUEUE_STATIC_PRIORITY)
827 static_priority_base = queue;
828 /* Find last queue of static priority */
829 if (static_priority_base != -1 && static_priority_end == -1 &&
830 priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY && queue)
831 static_priority_end = queue - 1;
832 else if (static_priority_base != -1 && static_priority_end == -1 &&
833 queue == num_queues - 1)
834 static_priority_end = queue; /* all queues're static priority */
835
836 /*
837 * Check to make sure all static priority queues are contiguous.
838 * Also catches some cases of static priorites not starting at
839 * queue 0.
840 */
841 if (static_priority_end != -1 && (int)queue > static_priority_end &&
842 priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
843 {
844 cvmx_dprintf("ERROR: cvmx_pko_config_port: Static priority "
845 "queues aren't contiguous or don't start at base queue. "
846 "q: %d, eq: %d\n", (int)queue, static_priority_end);
847 return CVMX_PKO_INVALID_PRIORITY;
848 }
849 }
850 if (static_priority_base > 0)
851 {
852 cvmx_dprintf("ERROR: cvmx_pko_config_port: Static priority queues "
853 "don't start at base queue. sq: %d\n", static_priority_base);
854 return CVMX_PKO_INVALID_PRIORITY;
855 }
856 }
857
858 /*
859 * At this point, static_priority_base and static_priority_end are either
860 * both -1, or are valid start/end queue numbers
861 */
862
863 result_code = CVMX_PKO_SUCCESS;
864
865 #ifdef PKO_DEBUG
866 cvmx_dprintf("num queues: %d (%lld,%lld)\n", (int)num_queues,
867 (unsigned long long)CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
868 (unsigned long long)CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
869 #endif
870
871 for (queue = 0; queue < num_queues; queue++)
872 {
873 uint64_t *buf_ptr = NULL;
874
875 config1.u64 = 0;
876 config1.s.idx3 = queue >> 3;
877 config1.s.qid7 = (base_queue + queue) >> 7;
878
879 config.u64 = 0;
880 config.s.tail = queue == (num_queues - 1);
881 config.s.index = queue;
882 config.s.port = port;
883 config.s.queue = base_queue + queue;
884
885 config.s.static_p = static_priority_base >= 0;
886 config.s.static_q = (int)queue <= static_priority_end;
887 config.s.s_tail = (int)queue == static_priority_end;
888 /*
889 * Convert the priority into an enable bit field. Try to space the bits
890 * out evenly so the packet don't get grouped up
891 */
892 switch ((int)priority[queue])
893 {
894 case 0: config.s.qos_mask = 0x00; break;
895 case 1: config.s.qos_mask = 0x01; break;
896 case 2: config.s.qos_mask = 0x11; break;
897 case 3: config.s.qos_mask = 0x49; break;
898 case 4: config.s.qos_mask = 0x55; break;
899 case 5: config.s.qos_mask = 0x57; break;
900 case 6: config.s.qos_mask = 0x77; break;
901 case 7: config.s.qos_mask = 0x7f; break;
902 case 8: config.s.qos_mask = 0xff; break;
903 case CVMX_PKO_QUEUE_STATIC_PRIORITY:
904 config.s.qos_mask = 0xff;
905 break;
906 default:
907 cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid priority %llu\n",
908 (unsigned long long)priority[queue]);
909 config.s.qos_mask = 0xff;
910 result_code = CVMX_PKO_INVALID_PRIORITY;
911 break;
912 }
913
914 if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)
915 {
916 cvmx_cmd_queue_result_t cmd_res = cvmx_cmd_queue_initialize(
917 CVMX_CMD_QUEUE_PKO(base_queue + queue),
918 CVMX_PKO_MAX_QUEUE_DEPTH,
919 CVMX_FPA_OUTPUT_BUFFER_POOL,
920 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
921 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST*8);
922 if (cmd_res != CVMX_CMD_QUEUE_SUCCESS)
923 {
924 switch (cmd_res)
925 {
926 case CVMX_CMD_QUEUE_NO_MEMORY:
927 cvmx_dprintf("ERROR: cvmx_pko_config_port: "
928 "Unable to allocate output buffer.\n");
929 return(CVMX_PKO_NO_MEMORY);
930 case CVMX_CMD_QUEUE_ALREADY_SETUP:
931 cvmx_dprintf("ERROR: cvmx_pko_config_port: "
932 "Port already setup.\n");
933 return(CVMX_PKO_PORT_ALREADY_SETUP);
934 case CVMX_CMD_QUEUE_INVALID_PARAM:
935 default:
936 cvmx_dprintf("ERROR: cvmx_pko_config_port: "
937 "Command queue initialization failed.\n");
938 return(CVMX_PKO_CMD_QUEUE_INIT_ERROR);
939 }
940 }
941
942 buf_ptr = (uint64_t*)cvmx_cmd_queue_buffer(
943 CVMX_CMD_QUEUE_PKO(base_queue + queue));
944 config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
945 }
946 else
947 config.s.buf_ptr = 0;
948
949 CVMX_SYNCWS;
950
951 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
952 {
953 cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
954 }
955 cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
956 }
957
958 return result_code;
959 }
960
961 /**
962 * Rate limit a PKO port to a max packets/sec. This function is only
963 * supported on CN51XX and higher, excluding CN58XX.
964 *
965 * @param port Port to rate limit
966 * @param packets_s Maximum packet/sec
967 * @param burst Maximum number of packets to burst in a row before rate
968 * limiting cuts in.
969 *
970 * @return Zero on success, negative on failure
971 */
cvmx_pko_rate_limit_packets(int port,int packets_s,int burst)972 int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
973 {
974 cvmx_pko_mem_port_rate0_t pko_mem_port_rate0;
975 cvmx_pko_mem_port_rate1_t pko_mem_port_rate1;
976
977 pko_mem_port_rate0.u64 = 0;
978 pko_mem_port_rate0.s.pid = port;
979 pko_mem_port_rate0.s.rate_pkt =
980 cvmx_clock_get_rate(CVMX_CLOCK_SCLK) / packets_s / 16;
981 /* No cost per word since we are limited by packets/sec, not bits/sec */
982 pko_mem_port_rate0.s.rate_word = 0;
983
984 pko_mem_port_rate1.u64 = 0;
985 pko_mem_port_rate1.s.pid = port;
986 pko_mem_port_rate1.s.rate_lim =
987 ((uint64_t)pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
988
989 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
990 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
991 return 0;
992 }
993
994 /**
995 * Rate limit a PKO port to a max bits/sec. This function is only
996 * supported on CN51XX and higher, excluding CN58XX.
997 *
998 * @param port Port to rate limit
999 * @param bits_s PKO rate limit in bits/sec
1000 * @param burst Maximum number of bits to burst before rate
1001 * limiting cuts in.
1002 *
1003 * @return Zero on success, negative on failure
1004 */
cvmx_pko_rate_limit_bits(int port,uint64_t bits_s,int burst)1005 int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
1006 {
1007 cvmx_pko_mem_port_rate0_t pko_mem_port_rate0;
1008 cvmx_pko_mem_port_rate1_t pko_mem_port_rate1;
1009 uint64_t clock_rate = cvmx_clock_get_rate(CVMX_CLOCK_SCLK);
1010 uint64_t tokens_per_bit = clock_rate*16 / bits_s;
1011
1012 pko_mem_port_rate0.u64 = 0;
1013 pko_mem_port_rate0.s.pid = port;
1014 /* Each packet has a 12 bytes of interframe gap, an 8 byte preamble, and a
1015 4 byte CRC. These are not included in the per word count. Multiply
1016 by 8 to covert to bits and divide by 256 for limit granularity */
1017 pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
1018 /* Each 8 byte word has 64bits */
1019 pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
1020
1021 pko_mem_port_rate1.u64 = 0;
1022 pko_mem_port_rate1.s.pid = port;
1023 pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
1024
1025 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
1026 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
1027 return 0;
1028 }
1029
1030 #endif /* CVMX_ENABLE_PKO_FUNCTIONS */
1031