xref: /dpdk/drivers/crypto/nitrox/nitrox_hal.c (revision 32e4930d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <rte_common.h>
6 #include <rte_cycles.h>
7 #include <rte_memory.h>
8 #include <rte_byteorder.h>
9 
10 #include "nitrox_hal.h"
11 #include "nitrox_csr.h"
12 
13 #define MAX_VF_QUEUES	8
14 #define MAX_PF_QUEUES	64
15 #define NITROX_TIMER_THOLD	0x3FFFFF
16 #define NITROX_COUNT_THOLD      0xFFFFFFFF
17 
18 void
nps_pkt_input_ring_disable(uint8_t * bar_addr,uint16_t ring)19 nps_pkt_input_ring_disable(uint8_t *bar_addr, uint16_t ring)
20 {
21 	union nps_pkt_in_instr_ctl pkt_in_instr_ctl;
22 	uint64_t reg_addr;
23 	int max_retries = 5;
24 
25 	reg_addr = NPS_PKT_IN_INSTR_CTLX(ring);
26 	pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
27 	pkt_in_instr_ctl.s.enb = 0;
28 	nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_ctl.u64);
29 	rte_delay_us_block(100);
30 
31 	/* wait for enable bit to be cleared */
32 	pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
33 	while (pkt_in_instr_ctl.s.enb && max_retries--) {
34 		rte_delay_ms(10);
35 		pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
36 	}
37 }
38 
39 void
nps_pkt_solicited_port_disable(uint8_t * bar_addr,uint16_t port)40 nps_pkt_solicited_port_disable(uint8_t *bar_addr, uint16_t port)
41 {
42 	union nps_pkt_slc_ctl pkt_slc_ctl;
43 	uint64_t reg_addr;
44 	int max_retries = 5;
45 
46 	/* clear enable bit */
47 	reg_addr = NPS_PKT_SLC_CTLX(port);
48 	pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
49 	pkt_slc_ctl.s.enb = 0;
50 	nitrox_write_csr(bar_addr, reg_addr, pkt_slc_ctl.u64);
51 	rte_delay_us_block(100);
52 
53 	pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
54 	while (pkt_slc_ctl.s.enb && max_retries--) {
55 		rte_delay_ms(10);
56 		pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
57 	}
58 }
59 
60 void
setup_nps_pkt_input_ring(uint8_t * bar_addr,uint16_t ring,uint32_t rsize,phys_addr_t raddr)61 setup_nps_pkt_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize,
62 			 phys_addr_t raddr)
63 {
64 	union nps_pkt_in_instr_ctl pkt_in_instr_ctl;
65 	union nps_pkt_in_instr_rsize pkt_in_instr_rsize;
66 	union nps_pkt_in_instr_baoff_dbell pkt_in_instr_baoff_dbell;
67 	union nps_pkt_in_done_cnts pkt_in_done_cnts;
68 	uint64_t base_addr, reg_addr;
69 	int max_retries = 5;
70 
71 	nps_pkt_input_ring_disable(bar_addr, ring);
72 
73 	/* write base address */
74 	reg_addr = NPS_PKT_IN_INSTR_BADDRX(ring);
75 	base_addr = raddr;
76 	nitrox_write_csr(bar_addr, reg_addr, base_addr);
77 	rte_delay_us_block(CSR_DELAY);
78 
79 	/* write ring size */
80 	reg_addr = NPS_PKT_IN_INSTR_RSIZEX(ring);
81 	pkt_in_instr_rsize.u64 = 0;
82 	pkt_in_instr_rsize.s.rsize = rsize;
83 	nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_rsize.u64);
84 	rte_delay_us_block(CSR_DELAY);
85 
86 	/* clear door bell */
87 	reg_addr = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring);
88 	pkt_in_instr_baoff_dbell.u64 = 0;
89 	pkt_in_instr_baoff_dbell.s.dbell = 0xFFFFFFFF;
90 	nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_baoff_dbell.u64);
91 	rte_delay_us_block(CSR_DELAY);
92 
93 	/* clear done count */
94 	reg_addr = NPS_PKT_IN_DONE_CNTSX(ring);
95 	pkt_in_done_cnts.u64 = nitrox_read_csr(bar_addr, reg_addr);
96 	nitrox_write_csr(bar_addr, reg_addr, pkt_in_done_cnts.u64);
97 	rte_delay_us_block(CSR_DELAY);
98 
99 	/* Setup PKT IN RING Interrupt Threshold */
100 	reg_addr = NPS_PKT_IN_INT_LEVELSX(ring);
101 	nitrox_write_csr(bar_addr, reg_addr, 0xFFFFFFFF);
102 	rte_delay_us_block(CSR_DELAY);
103 
104 	/* enable ring */
105 	reg_addr = NPS_PKT_IN_INSTR_CTLX(ring);
106 	pkt_in_instr_ctl.u64 = 0;
107 	pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
108 	pkt_in_instr_ctl.s.is64b = 1;
109 	pkt_in_instr_ctl.s.enb = 1;
110 	nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_ctl.u64);
111 	rte_delay_us_block(100);
112 
113 	pkt_in_instr_ctl.u64 = 0;
114 	pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
115 	/* wait for ring to be enabled */
116 	while (!pkt_in_instr_ctl.s.enb && max_retries--) {
117 		rte_delay_ms(10);
118 		pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
119 	}
120 }
121 
122 void
setup_nps_pkt_solicit_output_port(uint8_t * bar_addr,uint16_t port)123 setup_nps_pkt_solicit_output_port(uint8_t *bar_addr, uint16_t port)
124 {
125 	union nps_pkt_slc_ctl pkt_slc_ctl;
126 	union nps_pkt_slc_cnts pkt_slc_cnts;
127 	union nps_pkt_slc_int_levels pkt_slc_int_levels;
128 	uint64_t reg_addr;
129 	int max_retries = 5;
130 
131 	nps_pkt_solicited_port_disable(bar_addr, port);
132 
133 	/* clear pkt counts */
134 	reg_addr = NPS_PKT_SLC_CNTSX(port);
135 	pkt_slc_cnts.u64 = nitrox_read_csr(bar_addr, reg_addr);
136 	nitrox_write_csr(bar_addr, reg_addr, pkt_slc_cnts.u64);
137 	rte_delay_us_block(CSR_DELAY);
138 
139 	/* slc interrupt levels */
140 	reg_addr = NPS_PKT_SLC_INT_LEVELSX(port);
141 	pkt_slc_int_levels.u64 = 0;
142 	pkt_slc_int_levels.s.bmode = 0;
143 	pkt_slc_int_levels.s.timet = NITROX_TIMER_THOLD;
144 
145 	if (NITROX_COUNT_THOLD > 0)
146 		pkt_slc_int_levels.s.cnt = NITROX_COUNT_THOLD - 1;
147 
148 	nitrox_write_csr(bar_addr, reg_addr, pkt_slc_int_levels.u64);
149 	rte_delay_us_block(CSR_DELAY);
150 
151 	/* enable ring */
152 	reg_addr = NPS_PKT_SLC_CTLX(port);
153 	pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
154 	pkt_slc_ctl.s.rh = 1;
155 	pkt_slc_ctl.s.z = 1;
156 	pkt_slc_ctl.s.enb = 1;
157 	nitrox_write_csr(bar_addr, reg_addr, pkt_slc_ctl.u64);
158 	rte_delay_us_block(100);
159 
160 	pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
161 	while (!pkt_slc_ctl.s.enb && max_retries--) {
162 		rte_delay_ms(10);
163 		pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
164 	}
165 }
166 
167 int
vf_get_vf_config_mode(uint8_t * bar_addr)168 vf_get_vf_config_mode(uint8_t *bar_addr)
169 {
170 	union aqmq_qsz aqmq_qsz;
171 	uint64_t reg_addr;
172 	int q, vf_mode;
173 
174 	aqmq_qsz.u64 = 0;
175 	aqmq_qsz.s.host_queue_size = 0xDEADBEEF;
176 	reg_addr = AQMQ_QSZX(0);
177 	nitrox_write_csr(bar_addr, reg_addr, aqmq_qsz.u64);
178 	rte_delay_us_block(CSR_DELAY);
179 
180 	aqmq_qsz.u64 = 0;
181 	for (q = 1; q < MAX_VF_QUEUES; q++) {
182 		reg_addr = AQMQ_QSZX(q);
183 		aqmq_qsz.u64 = nitrox_read_csr(bar_addr, reg_addr);
184 		if (aqmq_qsz.s.host_queue_size == 0xDEADBEEF)
185 			break;
186 	}
187 
188 	switch (q) {
189 	case 1:
190 		vf_mode = NITROX_MODE_VF128;
191 		break;
192 	case 2:
193 		vf_mode = NITROX_MODE_VF64;
194 		break;
195 	case 4:
196 		vf_mode = NITROX_MODE_VF32;
197 		break;
198 	case 8:
199 		vf_mode = NITROX_MODE_VF16;
200 		break;
201 	default:
202 		vf_mode = 0;
203 		break;
204 	}
205 
206 	return vf_mode;
207 }
208 
209 int
vf_config_mode_to_nr_queues(enum nitrox_vf_mode vf_mode)210 vf_config_mode_to_nr_queues(enum nitrox_vf_mode vf_mode)
211 {
212 	int nr_queues;
213 
214 	switch (vf_mode) {
215 	case NITROX_MODE_PF:
216 		nr_queues = MAX_PF_QUEUES;
217 		break;
218 	case NITROX_MODE_VF16:
219 		nr_queues = 8;
220 		break;
221 	case NITROX_MODE_VF32:
222 		nr_queues = 4;
223 		break;
224 	case NITROX_MODE_VF64:
225 		nr_queues = 2;
226 		break;
227 	case NITROX_MODE_VF128:
228 		nr_queues = 1;
229 		break;
230 	default:
231 		nr_queues = 0;
232 		break;
233 	}
234 
235 	return nr_queues;
236 }
237