1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4 
5 #include "hinic_compat.h"
6 #include "hinic_csr.h"
7 #include "hinic_pmd_hwdev.h"
8 #include "hinic_pmd_hwif.h"
9 #include "hinic_pmd_mgmt.h"
10 #include "hinic_pmd_eqs.h"
11 
12 #define AEQ_CTRL_0_INTR_IDX_SHIFT		0
13 #define AEQ_CTRL_0_DMA_ATTR_SHIFT		12
14 #define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT		20
15 #define AEQ_CTRL_0_INTR_MODE_SHIFT		31
16 
17 #define AEQ_CTRL_0_INTR_IDX_MASK		0x3FFU
18 #define AEQ_CTRL_0_DMA_ATTR_MASK		0x3FU
19 #define AEQ_CTRL_0_PCI_INTF_IDX_MASK		0x3U
20 #define AEQ_CTRL_0_INTR_MODE_MASK		0x1U
21 
22 #define AEQ_CTRL_0_SET(val, member)		\
23 				(((val) & AEQ_CTRL_0_##member##_MASK) << \
24 				AEQ_CTRL_0_##member##_SHIFT)
25 
26 #define AEQ_CTRL_0_CLEAR(val, member)		\
27 				((val) & (~(AEQ_CTRL_0_##member##_MASK \
28 					<< AEQ_CTRL_0_##member##_SHIFT)))
29 
30 #define AEQ_CTRL_1_LEN_SHIFT			0
31 #define AEQ_CTRL_1_ELEM_SIZE_SHIFT		24
32 #define AEQ_CTRL_1_PAGE_SIZE_SHIFT		28
33 
34 #define AEQ_CTRL_1_LEN_MASK			0x1FFFFFU
35 #define AEQ_CTRL_1_ELEM_SIZE_MASK		0x3U
36 #define AEQ_CTRL_1_PAGE_SIZE_MASK		0xFU
37 
38 #define AEQ_CTRL_1_SET(val, member)		\
39 				(((val) & AEQ_CTRL_1_##member##_MASK) << \
40 				AEQ_CTRL_1_##member##_SHIFT)
41 
42 #define AEQ_CTRL_1_CLEAR(val, member)		\
43 				((val) & (~(AEQ_CTRL_1_##member##_MASK \
44 					<< AEQ_CTRL_1_##member##_SHIFT)))
45 
46 #define EQ_CONS_IDX_CONS_IDX_SHIFT		0
47 #define EQ_CONS_IDX_XOR_CHKSUM_SHIFT		24
48 #define EQ_CONS_IDX_INT_ARMED_SHIFT		31
49 
50 #define EQ_CONS_IDX_CONS_IDX_MASK		0x1FFFFFU
51 #define EQ_CONS_IDX_XOR_CHKSUM_MASK		0xFU
52 #define EQ_CONS_IDX_INT_ARMED_MASK		0x1U
53 
54 #define EQ_CONS_IDX_SET(val, member)		\
55 				(((val) & EQ_CONS_IDX_##member##_MASK) << \
56 				EQ_CONS_IDX_##member##_SHIFT)
57 
58 #define EQ_CONS_IDX_CLEAR(val, member)		\
59 				((val) & (~(EQ_CONS_IDX_##member##_MASK \
60 					<< EQ_CONS_IDX_##member##_SHIFT)))
61 
62 #define EQ_WRAPPED(eq)			((u32)(eq)->wrapped << EQ_VALID_SHIFT)
63 
64 #define EQ_CONS_IDX(eq)		((eq)->cons_idx | \
65 				((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT))
66 
67 #define EQ_CONS_IDX_REG_ADDR(eq)	\
68 				(HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id))
69 
70 #define EQ_PROD_IDX_REG_ADDR(eq)	\
71 				(HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id))
72 
73 #define GET_EQ_NUM_PAGES(eq, size)		\
74 		((u16)(ALIGN((eq)->eq_len * (u32)(eq)->elem_size, (size)) \
75 		/ (size)))
76 
77 #define GET_EQ_NUM_ELEMS(eq, pg_size)	((pg_size) / (u32)(eq)->elem_size)
78 
79 #define PAGE_IN_4K(page_size)		((page_size) >> 12)
80 #define EQ_SET_HW_PAGE_SIZE_VAL(eq) ((u32)ilog2(PAGE_IN_4K((eq)->page_size)))
81 
82 #define ELEMENT_SIZE_IN_32B(eq)		(((eq)->elem_size) >> 5)
83 #define EQ_SET_HW_ELEM_SIZE_VAL(eq)	((u32)ilog2(ELEMENT_SIZE_IN_32B(eq)))
84 
85 #define AEQ_DMA_ATTR_DEFAULT			0
86 
87 #define EQ_WRAPPED_SHIFT			20
88 
89 #define	EQ_VALID_SHIFT				31
90 
91 #define aeq_to_aeqs(eq) \
92 		container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
93 
eq_cons_idx_checksum_set(u32 val)94 static u8 eq_cons_idx_checksum_set(u32 val)
95 {
96 	u8 checksum = 0;
97 	u8 idx;
98 
99 	for (idx = 0; idx < 32; idx += 4)
100 		checksum ^= ((val >> idx) & 0xF);
101 
102 	return (checksum & 0xF);
103 }
104 
105 /**
106  * set_eq_cons_idx - write the cons idx to the hw
107  * @eq: The event queue to update the cons idx for
108  * @arm_state: indicate whether report interrupts when generate eq element
109  */
set_eq_cons_idx(struct hinic_eq * eq,u32 arm_state)110 static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state)
111 {
112 	u32 eq_cons_idx, eq_wrap_ci, val;
113 	u32 addr = EQ_CONS_IDX_REG_ADDR(eq);
114 
115 	eq_wrap_ci = EQ_CONS_IDX(eq);
116 
117 	/* Read Modify Write */
118 	val = hinic_hwif_read_reg(eq->hwdev->hwif, addr);
119 
120 	val = EQ_CONS_IDX_CLEAR(val, CONS_IDX) &
121 		EQ_CONS_IDX_CLEAR(val, INT_ARMED) &
122 		EQ_CONS_IDX_CLEAR(val, XOR_CHKSUM);
123 
124 	/* Just aeq0 use int_arm mode for pmd drv to recv
125 	 * asyn event&mbox recv data
126 	 */
127 	if (eq->q_id == 0)
128 		eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
129 			EQ_CONS_IDX_SET(arm_state, INT_ARMED);
130 	else
131 		eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
132 			EQ_CONS_IDX_SET(HINIC_EQ_NOT_ARMED, INT_ARMED);
133 
134 	val |= eq_cons_idx;
135 
136 	val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
137 
138 	hinic_hwif_write_reg(eq->hwdev->hwif, addr, val);
139 }
140 
141 /**
142  * eq_update_ci - update the cons idx of event queue
143  * @eq: the event queue to update the cons idx for
144  */
eq_update_ci(struct hinic_eq * eq)145 void eq_update_ci(struct hinic_eq *eq)
146 {
147 	set_eq_cons_idx(eq, HINIC_EQ_ARMED);
148 }
149 
150 /**
151  * set_eq_ctrls - setting eq's ctrls registers
152  * @eq: the event queue for setting
153  */
set_aeq_ctrls(struct hinic_eq * eq)154 static void set_aeq_ctrls(struct hinic_eq *eq)
155 {
156 	struct hinic_hwif *hwif = eq->hwdev->hwif;
157 	struct irq_info *eq_irq = &eq->eq_irq;
158 	u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size;
159 	u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif);
160 
161 	/* set ctrl0 */
162 	addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
163 
164 	val = hinic_hwif_read_reg(hwif, addr);
165 
166 	val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) &
167 		AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
168 		AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
169 		AEQ_CTRL_0_CLEAR(val, INTR_MODE);
170 
171 	ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
172 		AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR)	|
173 		AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX)	|
174 		AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
175 
176 	val |= ctrl0;
177 
178 	hinic_hwif_write_reg(hwif, addr, val);
179 
180 	/* set ctrl1 */
181 	addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
182 
183 	page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
184 	elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
185 
186 	ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN)		|
187 		AEQ_CTRL_1_SET(elem_size, ELEM_SIZE)	|
188 		AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
189 
190 	hinic_hwif_write_reg(hwif, addr, ctrl1);
191 }
192 
193 /**
194  * aeq_elements_init - initialize all the elements in the aeq
195  * @eq: the event queue
196  * @init_val: value to init with it the elements
197  */
aeq_elements_init(struct hinic_eq * eq,u32 init_val)198 static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
199 {
200 	struct hinic_aeq_elem *aeqe;
201 	u16 i;
202 
203 	for (i = 0; i < eq->eq_len; i++) {
204 		aeqe = GET_AEQ_ELEM(eq, i);
205 		aeqe->desc = cpu_to_be32(init_val);
206 	}
207 
208 	rte_wmb();	/* Write the init values */
209 }
210 
211 /**
212  * alloc_eq_pages - allocate the pages for the queue
213  * @eq: the event queue
214  */
alloc_eq_pages(struct hinic_eq * eq)215 static int alloc_eq_pages(struct hinic_eq *eq)
216 {
217 	struct hinic_hwif *hwif = eq->hwdev->hwif;
218 	u32 init_val;
219 	u64 dma_addr_size, virt_addr_size;
220 	u16 pg_num, i;
221 	int err;
222 
223 	dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr);
224 	virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr);
225 
226 	eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL);
227 	if (!eq->dma_addr) {
228 		PMD_DRV_LOG(ERR, "Allocate dma addr array failed");
229 		return -ENOMEM;
230 	}
231 
232 	eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL);
233 	if (!eq->virt_addr) {
234 		PMD_DRV_LOG(ERR, "Allocate virt addr array failed");
235 		err = -ENOMEM;
236 		goto virt_addr_alloc_err;
237 	}
238 
239 	for (pg_num = 0; pg_num < eq->num_pages; pg_num++) {
240 		eq->virt_addr[pg_num] =
241 			(u8 *)dma_zalloc_coherent_aligned(eq->hwdev,
242 					eq->page_size, &eq->dma_addr[pg_num],
243 					SOCKET_ID_ANY);
244 		if (!eq->virt_addr[pg_num]) {
245 			err = -ENOMEM;
246 			goto dma_alloc_err;
247 		}
248 
249 		hinic_hwif_write_reg(hwif,
250 				     HINIC_EQ_HI_PHYS_ADDR_REG(eq->type,
251 				     eq->q_id, pg_num),
252 				     upper_32_bits(eq->dma_addr[pg_num]));
253 
254 		hinic_hwif_write_reg(hwif,
255 				     HINIC_EQ_LO_PHYS_ADDR_REG(eq->type,
256 				     eq->q_id, pg_num),
257 				     lower_32_bits(eq->dma_addr[pg_num]));
258 	}
259 
260 	init_val = EQ_WRAPPED(eq);
261 
262 	aeq_elements_init(eq, init_val);
263 
264 	return 0;
265 
266 dma_alloc_err:
267 	for (i = 0; i < pg_num; i++)
268 		dma_free_coherent(eq->hwdev, eq->page_size,
269 				  eq->virt_addr[i], eq->dma_addr[i]);
270 
271 virt_addr_alloc_err:
272 	kfree(eq->dma_addr);
273 	return err;
274 }
275 
276 /**
277  * free_eq_pages - free the pages of the queue
278  * @eq: the event queue
279  */
free_eq_pages(struct hinic_eq * eq)280 static void free_eq_pages(struct hinic_eq *eq)
281 {
282 	struct hinic_hwdev *hwdev = eq->hwdev;
283 	u16 pg_num;
284 
285 	for (pg_num = 0; pg_num < eq->num_pages; pg_num++)
286 		dma_free_coherent(hwdev, eq->page_size,
287 				  eq->virt_addr[pg_num],
288 				  eq->dma_addr[pg_num]);
289 
290 	kfree(eq->virt_addr);
291 	kfree(eq->dma_addr);
292 }
293 
294 #define MSIX_ENTRY_IDX_0 (0)
295 
296 /**
297  * init_aeq - initialize aeq
298  * @eq:	the event queue
299  * @hwdev: the pointer to the private hardware device object
300  * @q_id: Queue id number
301  * @q_len: the number of EQ elements
302  * @type: the type of the event queue, ceq or aeq
303  * @page_size: the page size of the event queue
304  * @entry: msix entry associated with the event queue
305  * Return: 0 - Success, Negative - failure
306  */
init_aeq(struct hinic_eq * eq,struct hinic_hwdev * hwdev,u16 q_id,u16 q_len,u32 page_size,__rte_unused struct irq_info * entry)307 static int init_aeq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id,
308 		   u16 q_len, u32 page_size,
309 		   __rte_unused struct irq_info *entry)
310 {
311 	int err = 0;
312 
313 	eq->hwdev = hwdev;
314 	eq->q_id = q_id;
315 	eq->type = HINIC_AEQ;
316 	eq->page_size = page_size;
317 	eq->eq_len = q_len;
318 
319 	/* clear eq_len to force eqe drop in hardware */
320 	hinic_hwif_write_reg(eq->hwdev->hwif,
321 			     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
322 
323 	/* Clear PI and CI, also clear the ARM bit */
324 	hinic_hwif_write_reg(eq->hwdev->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
325 	hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
326 
327 	eq->cons_idx = 0;
328 	eq->wrapped = 0;
329 
330 	eq->elem_size = HINIC_AEQE_SIZE;
331 	eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
332 	eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, page_size);
333 
334 	if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
335 		PMD_DRV_LOG(ERR, "Number element in eq page is not power of 2");
336 		return -EINVAL;
337 	}
338 
339 	if (eq->num_pages > HINIC_EQ_MAX_PAGES) {
340 		PMD_DRV_LOG(ERR, "Too many pages for eq, num_pages: %d",
341 			eq->num_pages);
342 		return -EINVAL;
343 	}
344 
345 	err = alloc_eq_pages(eq);
346 	if (err) {
347 		PMD_DRV_LOG(ERR, "Allocate pages for eq failed");
348 		return err;
349 	}
350 
351 	/* pmd use MSIX_ENTRY_IDX_0 */
352 	eq->eq_irq.msix_entry_idx = MSIX_ENTRY_IDX_0;
353 
354 	set_aeq_ctrls(eq);
355 	set_eq_cons_idx(eq, HINIC_EQ_ARMED);
356 
357 	if (eq->q_id == 0)
358 		hinic_set_msix_state(hwdev, 0, HINIC_MSIX_ENABLE);
359 
360 	eq->poll_retry_nr = HINIC_RETRY_NUM;
361 
362 	return 0;
363 }
364 
365 /**
366  * remove_aeq - remove aeq
367  * @eq:	the event queue
368  */
remove_aeq(struct hinic_eq * eq)369 static void remove_aeq(struct hinic_eq *eq)
370 {
371 	struct irq_info *entry = &eq->eq_irq;
372 
373 	if (eq->q_id == 0)
374 		hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx,
375 				     HINIC_MSIX_DISABLE);
376 
377 	/* clear eq_len to avoid hw access host memory */
378 	hinic_hwif_write_reg(eq->hwdev->hwif,
379 			     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
380 
381 	/* update cons_idx to avoid invalid interrupt */
382 	eq->cons_idx = (u16)hinic_hwif_read_reg(eq->hwdev->hwif,
383 						EQ_PROD_IDX_REG_ADDR(eq));
384 	set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
385 
386 	free_eq_pages(eq);
387 }
388 
389 /**
390  * hinic_aeqs_init - init all the aeqs
391  * @hwdev: the pointer to the private hardware device object
392  * @num_aeqs: number of aeq
393  * @msix_entries: msix entries associated with the event queues
394  * Return: 0 - Success, Negative - failure
395  */
396 static int
hinic_aeqs_init(struct hinic_hwdev * hwdev,u16 num_aeqs,struct irq_info * msix_entries)397 hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,
398 		struct irq_info *msix_entries)
399 {
400 	struct hinic_aeqs *aeqs;
401 	int err;
402 	u16 i, q_id;
403 
404 	aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
405 	if (!aeqs)
406 		return -ENOMEM;
407 
408 	hwdev->aeqs = aeqs;
409 	aeqs->hwdev = hwdev;
410 	aeqs->num_aeqs = num_aeqs;
411 
412 	for (q_id = HINIC_AEQN_START; q_id < num_aeqs; q_id++) {
413 		err = init_aeq(&aeqs->aeq[q_id], hwdev, q_id,
414 			      HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE,
415 			      &msix_entries[q_id]);
416 		if (err) {
417 			PMD_DRV_LOG(ERR, "Init aeq %d failed", q_id);
418 			goto init_aeq_err;
419 		}
420 	}
421 
422 	return 0;
423 
424 init_aeq_err:
425 	for (i = 0; i < q_id; i++)
426 		remove_aeq(&aeqs->aeq[i]);
427 
428 	kfree(aeqs);
429 
430 	return err;
431 }
432 
433 /**
434  * hinic_aeqs_free - free all the aeqs
435  * @hwdev: the pointer to the private hardware device object
436  */
hinic_aeqs_free(struct hinic_hwdev * hwdev)437 static void hinic_aeqs_free(struct hinic_hwdev *hwdev)
438 {
439 	struct hinic_aeqs *aeqs = hwdev->aeqs;
440 	u16 q_id;
441 
442 	/* hinic pmd use aeq[1~3], aeq[0] used in kernel only */
443 	for (q_id = HINIC_AEQN_START; q_id < aeqs->num_aeqs ; q_id++)
444 		remove_aeq(&aeqs->aeq[q_id]);
445 
446 	kfree(aeqs);
447 }
448 
hinic_dump_aeq_info(struct hinic_hwdev * hwdev)449 void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
450 {
451 	struct hinic_eq *eq;
452 	u32 addr, ci, pi;
453 	int q_id;
454 
455 	for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {
456 		eq = &hwdev->aeqs->aeq[q_id];
457 		addr = EQ_CONS_IDX_REG_ADDR(eq);
458 		ci = hinic_hwif_read_reg(hwdev->hwif, addr);
459 		addr = EQ_PROD_IDX_REG_ADDR(eq);
460 		pi = hinic_hwif_read_reg(hwdev->hwif, addr);
461 		PMD_DRV_LOG(ERR, "aeq id: %d, ci: 0x%x, pi: 0x%x",
462 			q_id, ci, pi);
463 	}
464 }
465 
hinic_comm_aeqs_init(struct hinic_hwdev * hwdev)466 int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev)
467 {
468 	int rc;
469 	u16 num_aeqs;
470 	struct irq_info aeq_irqs[HINIC_MAX_AEQS];
471 
472 	num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
473 	if (num_aeqs < HINIC_MIN_AEQS) {
474 		PMD_DRV_LOG(ERR, "PMD need %d AEQs, Chip has %d\n",
475 				HINIC_MIN_AEQS, num_aeqs);
476 		return -EINVAL;
477 	}
478 
479 	memset(aeq_irqs, 0, sizeof(aeq_irqs));
480 	rc = hinic_aeqs_init(hwdev, num_aeqs, aeq_irqs);
481 	if (rc != HINIC_OK)
482 		PMD_DRV_LOG(ERR, "Initialize aeqs failed, rc: %d", rc);
483 
484 	return rc;
485 }
486 
hinic_comm_aeqs_free(struct hinic_hwdev * hwdev)487 void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev)
488 {
489 	hinic_aeqs_free(hwdev);
490 }
491