xref: /f-stack/dpdk/drivers/net/i40e/base/i40e_hmc.c (revision 0c6bd470)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2020 Intel Corporation
3  */
4 
5 #include "i40e_osdep.h"
6 #include "i40e_register.h"
7 #include "i40e_status.h"
8 #include "i40e_alloc.h"
9 #include "i40e_hmc.h"
10 #include "i40e_type.h"
11 
12 /**
13  * i40e_add_sd_table_entry - Adds a segment descriptor to the table
14  * @hw: pointer to our hw struct
15  * @hmc_info: pointer to the HMC configuration information struct
16  * @sd_index: segment descriptor index to manipulate
17  * @type: what type of segment descriptor we're manipulating
18  * @direct_mode_sz: size to alloc in direct mode
19  **/
i40e_add_sd_table_entry(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 sd_index,enum i40e_sd_entry_type type,u64 direct_mode_sz)20 enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
21 					      struct i40e_hmc_info *hmc_info,
22 					      u32 sd_index,
23 					      enum i40e_sd_entry_type type,
24 					      u64 direct_mode_sz)
25 {
26 	enum i40e_status_code ret_code = I40E_SUCCESS;
27 	struct i40e_hmc_sd_entry *sd_entry;
28 	enum   i40e_memory_type mem_type;
29 	bool dma_mem_alloc_done = false;
30 	struct i40e_dma_mem mem;
31 	u64 alloc_len;
32 
33 	if (NULL == hmc_info->sd_table.sd_entry) {
34 		ret_code = I40E_ERR_BAD_PTR;
35 		DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
36 		goto exit;
37 	}
38 
39 	if (sd_index >= hmc_info->sd_table.sd_cnt) {
40 		ret_code = I40E_ERR_INVALID_SD_INDEX;
41 		DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
42 		goto exit;
43 	}
44 
45 	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
46 	if (!sd_entry->valid) {
47 		if (I40E_SD_TYPE_PAGED == type) {
48 			mem_type = i40e_mem_pd;
49 			alloc_len = I40E_HMC_PAGED_BP_SIZE;
50 		} else {
51 			mem_type = i40e_mem_bp_jumbo;
52 			alloc_len = direct_mode_sz;
53 		}
54 
55 		/* allocate a 4K pd page or 2M backing page */
56 		ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
57 						 I40E_HMC_PD_BP_BUF_ALIGNMENT);
58 		if (ret_code)
59 			goto exit;
60 		dma_mem_alloc_done = true;
61 		if (I40E_SD_TYPE_PAGED == type) {
62 			ret_code = i40e_allocate_virt_mem(hw,
63 					&sd_entry->u.pd_table.pd_entry_virt_mem,
64 					sizeof(struct i40e_hmc_pd_entry) * 512);
65 			if (ret_code)
66 				goto exit;
67 			sd_entry->u.pd_table.pd_entry =
68 				(struct i40e_hmc_pd_entry *)
69 				sd_entry->u.pd_table.pd_entry_virt_mem.va;
70 			i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
71 				    &mem, sizeof(struct i40e_dma_mem),
72 				    I40E_NONDMA_TO_NONDMA);
73 		} else {
74 			i40e_memcpy(&sd_entry->u.bp.addr,
75 				    &mem, sizeof(struct i40e_dma_mem),
76 				    I40E_NONDMA_TO_NONDMA);
77 			sd_entry->u.bp.sd_pd_index = sd_index;
78 		}
79 		/* initialize the sd entry */
80 		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
81 
82 		/* increment the ref count */
83 		I40E_INC_SD_REFCNT(&hmc_info->sd_table);
84 	}
85 	/* Increment backing page reference count */
86 	if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
87 		I40E_INC_BP_REFCNT(&sd_entry->u.bp);
88 exit:
89 	if (I40E_SUCCESS != ret_code)
90 		if (dma_mem_alloc_done)
91 			i40e_free_dma_mem(hw, &mem);
92 
93 	return ret_code;
94 }
95 
96 /**
97  * i40e_add_pd_table_entry - Adds page descriptor to the specified table
98  * @hw: pointer to our HW structure
99  * @hmc_info: pointer to the HMC configuration information structure
100  * @pd_index: which page descriptor index to manipulate
101  * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
102  *
103  * This function:
104  *	1. Initializes the pd entry
105  *	2. Adds pd_entry in the pd_table
106  *	3. Mark the entry valid in i40e_hmc_pd_entry structure
107  *	4. Initializes the pd_entry's ref count to 1
108  * assumptions:
109  *	1. The memory for pd should be pinned down, physically contiguous and
110  *	   aligned on 4K boundary and zeroed memory.
111  *	2. It should be 4K in size.
112  **/
i40e_add_pd_table_entry(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 pd_index,struct i40e_dma_mem * rsrc_pg)113 enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
114 					      struct i40e_hmc_info *hmc_info,
115 					      u32 pd_index,
116 					      struct i40e_dma_mem *rsrc_pg)
117 {
118 	enum i40e_status_code ret_code = I40E_SUCCESS;
119 	struct i40e_hmc_pd_table *pd_table;
120 	struct i40e_hmc_pd_entry *pd_entry;
121 	struct i40e_dma_mem mem;
122 	struct i40e_dma_mem *page = &mem;
123 	u32 sd_idx, rel_pd_idx;
124 	u64 *pd_addr;
125 	u64 page_desc;
126 
127 	if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
128 		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
129 		DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
130 		goto exit;
131 	}
132 
133 	/* find corresponding sd */
134 	sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
135 	if (I40E_SD_TYPE_PAGED !=
136 	    hmc_info->sd_table.sd_entry[sd_idx].entry_type)
137 		goto exit;
138 
139 	rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
140 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
141 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
142 	if (!pd_entry->valid) {
143 		if (rsrc_pg) {
144 			pd_entry->rsrc_pg = true;
145 			page = rsrc_pg;
146 		} else {
147 			/* allocate a 4K backing page */
148 			ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
149 						I40E_HMC_PAGED_BP_SIZE,
150 						I40E_HMC_PD_BP_BUF_ALIGNMENT);
151 			if (ret_code)
152 				goto exit;
153 			pd_entry->rsrc_pg = false;
154 		}
155 
156 		i40e_memcpy(&pd_entry->bp.addr, page,
157 			    sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
158 		pd_entry->bp.sd_pd_index = pd_index;
159 		pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
160 		/* Set page address and valid bit */
161 		page_desc = page->pa | 0x1;
162 
163 		pd_addr = (u64 *)pd_table->pd_page_addr.va;
164 		pd_addr += rel_pd_idx;
165 
166 		/* Add the backing page physical address in the pd entry */
167 		i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
168 			    I40E_NONDMA_TO_DMA);
169 
170 		pd_entry->sd_index = sd_idx;
171 		pd_entry->valid = true;
172 		I40E_INC_PD_REFCNT(pd_table);
173 	}
174 	I40E_INC_BP_REFCNT(&pd_entry->bp);
175 exit:
176 	return ret_code;
177 }
178 
179 /**
180  * i40e_remove_pd_bp - remove a backing page from a page descriptor
181  * @hw: pointer to our HW structure
182  * @hmc_info: pointer to the HMC configuration information structure
183  * @idx: the page index
184  *
185  * This function:
186  *	1. Marks the entry in pd tabe (for paged address mode) or in sd table
187  *	   (for direct address mode) invalid.
188  *	2. Write to register PMPDINV to invalidate the backing page in FV cache
189  *	3. Decrement the ref count for the pd _entry
190  * assumptions:
191  *	1. Caller can deallocate the memory used by backing storage after this
192  *	   function returns.
193  **/
i40e_remove_pd_bp(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx)194 enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
195 					struct i40e_hmc_info *hmc_info,
196 					u32 idx)
197 {
198 	enum i40e_status_code ret_code = I40E_SUCCESS;
199 	struct i40e_hmc_pd_entry *pd_entry;
200 	struct i40e_hmc_pd_table *pd_table;
201 	struct i40e_hmc_sd_entry *sd_entry;
202 	u32 sd_idx, rel_pd_idx;
203 	u64 *pd_addr;
204 
205 	/* calculate index */
206 	sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
207 	rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
208 	if (sd_idx >= hmc_info->sd_table.sd_cnt) {
209 		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
210 		DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
211 		goto exit;
212 	}
213 	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
214 	if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
215 		ret_code = I40E_ERR_INVALID_SD_TYPE;
216 		DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
217 		goto exit;
218 	}
219 	/* get the entry and decrease its ref counter */
220 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
221 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
222 	I40E_DEC_BP_REFCNT(&pd_entry->bp);
223 	if (pd_entry->bp.ref_cnt)
224 		goto exit;
225 
226 	/* mark the entry invalid */
227 	pd_entry->valid = false;
228 	I40E_DEC_PD_REFCNT(pd_table);
229 	pd_addr = (u64 *)pd_table->pd_page_addr.va;
230 	pd_addr += rel_pd_idx;
231 	i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
232 	I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
233 
234 	/* free memory here */
235 	if (!pd_entry->rsrc_pg)
236 		ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
237 	if (I40E_SUCCESS != ret_code)
238 		goto exit;
239 	if (!pd_table->ref_cnt)
240 		i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
241 exit:
242 	return ret_code;
243 }
244 
245 /**
246  * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
247  * @hmc_info: pointer to the HMC configuration information structure
248  * @idx: the page index
249  **/
i40e_prep_remove_sd_bp(struct i40e_hmc_info * hmc_info,u32 idx)250 enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
251 					     u32 idx)
252 {
253 	enum i40e_status_code ret_code = I40E_SUCCESS;
254 	struct i40e_hmc_sd_entry *sd_entry;
255 
256 	/* get the entry and decrease its ref counter */
257 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
258 	I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
259 	if (sd_entry->u.bp.ref_cnt) {
260 		ret_code = I40E_ERR_NOT_READY;
261 		goto exit;
262 	}
263 	I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
264 
265 	/* mark the entry invalid */
266 	sd_entry->valid = false;
267 exit:
268 	return ret_code;
269 }
270 
271 /**
272  * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
273  * @hw: pointer to our hw struct
274  * @hmc_info: pointer to the HMC configuration information structure
275  * @idx: the page index
276  * @is_pf: used to distinguish between VF and PF
277  **/
i40e_remove_sd_bp_new(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx,bool is_pf)278 enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
279 					    struct i40e_hmc_info *hmc_info,
280 					    u32 idx, bool is_pf)
281 {
282 	struct i40e_hmc_sd_entry *sd_entry;
283 
284 	if (!is_pf)
285 		return I40E_NOT_SUPPORTED;
286 
287 	/* get the entry and decrease its ref counter */
288 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
289 	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
290 
291 	return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
292 }
293 
294 /**
295  * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
296  * @hmc_info: pointer to the HMC configuration information structure
297  * @idx: segment descriptor index to find the relevant page descriptor
298  **/
i40e_prep_remove_pd_page(struct i40e_hmc_info * hmc_info,u32 idx)299 enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
300 					       u32 idx)
301 {
302 	enum i40e_status_code ret_code = I40E_SUCCESS;
303 	struct i40e_hmc_sd_entry *sd_entry;
304 
305 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
306 
307 	if (sd_entry->u.pd_table.ref_cnt) {
308 		ret_code = I40E_ERR_NOT_READY;
309 		goto exit;
310 	}
311 
312 	/* mark the entry invalid */
313 	sd_entry->valid = false;
314 
315 	I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
316 exit:
317 	return ret_code;
318 }
319 
320 /**
321  * i40e_remove_pd_page_new - Removes a PD page from sd entry.
322  * @hw: pointer to our hw struct
323  * @hmc_info: pointer to the HMC configuration information structure
324  * @idx: segment descriptor index to find the relevant page descriptor
325  * @is_pf: used to distinguish between VF and PF
326  **/
i40e_remove_pd_page_new(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx,bool is_pf)327 enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
328 					      struct i40e_hmc_info *hmc_info,
329 					      u32 idx, bool is_pf)
330 {
331 	struct i40e_hmc_sd_entry *sd_entry;
332 
333 	if (!is_pf)
334 		return I40E_NOT_SUPPORTED;
335 
336 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
337 	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
338 
339 	return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
340 }
341