1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #ifndef _QED_CHAIN_H 10 #define _QED_CHAIN_H 11 12 #include <linux/types.h> 13 #include <asm/byteorder.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/slab.h> 17 #include <linux/qed/common_hsi.h> 18 19 /* dma_addr_t manip */ 20 #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) 21 #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) 22 23 #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) 24 #define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) 25 #define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) 26 #define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo)) 27 #define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) 28 29 enum qed_chain_mode { 30 /* Each Page contains a next pointer at its end */ 31 QED_CHAIN_MODE_NEXT_PTR, 32 33 /* Chain is a single page (next ptr) is unrequired */ 34 QED_CHAIN_MODE_SINGLE, 35 36 /* Page pointers are located in a side list */ 37 QED_CHAIN_MODE_PBL, 38 }; 39 40 enum qed_chain_use_mode { 41 QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ 42 QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ 43 QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ 44 }; 45 46 struct qed_chain_next { 47 struct regpair next_phys; 48 void *next_virt; 49 }; 50 51 struct qed_chain_pbl { 52 dma_addr_t p_phys_table; 53 void *p_virt_table; 54 u16 prod_page_idx; 55 u16 cons_page_idx; 56 }; 57 58 struct qed_chain { 59 void *p_virt_addr; 60 dma_addr_t p_phys_addr; 61 void *p_prod_elem; 62 void *p_cons_elem; 63 u16 page_cnt; 64 enum qed_chain_mode mode; 65 enum qed_chain_use_mode intended_use; /* used to produce/consume */ 66 u16 capacity; /*< number of _usable_ elements */ 67 u16 size; /* number of elements */ 68 u16 prod_idx; 69 u16 cons_idx; 70 u16 elem_per_page; 71 u16 elem_per_page_mask; 72 u16 elem_unusable; 73 u16 usable_per_page; 74 u16 elem_size; 75 u16 next_page_mask; 76 struct qed_chain_pbl pbl; 77 }; 78 79 #define QED_CHAIN_PBL_ENTRY_SIZE (8) 80 #define QED_CHAIN_PAGE_SIZE (0x1000) 81 #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) 82 83 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ 84 ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \ 85 (1 + ((sizeof(struct qed_chain_next) - 1) / \ 86 (elem_size))) : 0) 87 88 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ 89 ((u32)(ELEMS_PER_PAGE(elem_size) - \ 90 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) 91 92 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ 93 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) 94 95 /* Accessors */ 96 static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) 97 { 98 return p_chain->prod_idx; 99 } 100 101 static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) 102 { 103 return p_chain->cons_idx; 104 } 105 106 static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) 107 { 108 u16 used; 109 110 /* we don't need to trancate upon assignmet, as we assign u32->u16 */ 111 used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - 112 (u32)p_chain->cons_idx; 113 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) 114 used -= (used / p_chain->elem_per_page); 115 116 return p_chain->capacity - used; 117 } 118 119 static inline u8 qed_chain_is_full(struct qed_chain *p_chain) 120 { 121 return qed_chain_get_elem_left(p_chain) == p_chain->capacity; 122 } 123 124 static inline u8 qed_chain_is_empty(struct qed_chain *p_chain) 125 { 126 return qed_chain_get_elem_left(p_chain) == 0; 127 } 128 129 static inline u16 qed_chain_get_elem_per_page( 130 struct qed_chain *p_chain) 131 { 132 return p_chain->elem_per_page; 133 } 134 135 static inline u16 qed_chain_get_usable_per_page( 136 struct qed_chain *p_chain) 137 { 138 return p_chain->usable_per_page; 139 } 140 141 static inline u16 qed_chain_get_unusable_per_page( 142 struct qed_chain *p_chain) 143 { 144 return p_chain->elem_unusable; 145 } 146 147 static inline u16 qed_chain_get_size(struct qed_chain *p_chain) 148 { 149 return p_chain->size; 150 } 151 152 static inline dma_addr_t 153 qed_chain_get_pbl_phys(struct qed_chain *p_chain) 154 { 155 return p_chain->pbl.p_phys_table; 156 } 157 158 /** 159 * @brief qed_chain_advance_page - 160 * 161 * Advance the next element accros pages for a linked chain 162 * 163 * @param p_chain 164 * @param p_next_elem 165 * @param idx_to_inc 166 * @param page_to_inc 167 */ 168 static inline void 169 qed_chain_advance_page(struct qed_chain *p_chain, 170 void **p_next_elem, 171 u16 *idx_to_inc, 172 u16 *page_to_inc) 173 174 { 175 switch (p_chain->mode) { 176 case QED_CHAIN_MODE_NEXT_PTR: 177 { 178 struct qed_chain_next *p_next = *p_next_elem; 179 *p_next_elem = p_next->next_virt; 180 *idx_to_inc += p_chain->elem_unusable; 181 break; 182 } 183 case QED_CHAIN_MODE_SINGLE: 184 *p_next_elem = p_chain->p_virt_addr; 185 break; 186 187 case QED_CHAIN_MODE_PBL: 188 /* It is assumed pages are sequential, next element needs 189 * to change only when passing going back to first from last. 190 */ 191 if (++(*page_to_inc) == p_chain->page_cnt) { 192 *page_to_inc = 0; 193 *p_next_elem = p_chain->p_virt_addr; 194 } 195 } 196 } 197 198 #define is_unusable_idx(p, idx) \ 199 (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page) 200 201 #define is_unusable_next_idx(p, idx) \ 202 ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page) 203 204 #define test_ans_skip(p, idx) \ 205 do { \ 206 if (is_unusable_idx(p, idx)) { \ 207 (p)->idx += (p)->elem_unusable; \ 208 } \ 209 } while (0) 210 211 /** 212 * @brief qed_chain_return_multi_produced - 213 * 214 * A chain in which the driver "Produces" elements should use this API 215 * to indicate previous produced elements are now consumed. 216 * 217 * @param p_chain 218 * @param num 219 */ 220 static inline void 221 qed_chain_return_multi_produced(struct qed_chain *p_chain, 222 u16 num) 223 { 224 p_chain->cons_idx += num; 225 test_ans_skip(p_chain, cons_idx); 226 } 227 228 /** 229 * @brief qed_chain_return_produced - 230 * 231 * A chain in which the driver "Produces" elements should use this API 232 * to indicate previous produced elements are now consumed. 233 * 234 * @param p_chain 235 */ 236 static inline void qed_chain_return_produced(struct qed_chain *p_chain) 237 { 238 p_chain->cons_idx++; 239 test_ans_skip(p_chain, cons_idx); 240 } 241 242 /** 243 * @brief qed_chain_produce - 244 * 245 * A chain in which the driver "Produces" elements should use this to get 246 * a pointer to the next element which can be "Produced". It's driver 247 * responsibility to validate that the chain has room for new element. 248 * 249 * @param p_chain 250 * 251 * @return void*, a pointer to next element 252 */ 253 static inline void *qed_chain_produce(struct qed_chain *p_chain) 254 { 255 void *ret = NULL; 256 257 if ((p_chain->prod_idx & p_chain->elem_per_page_mask) == 258 p_chain->next_page_mask) { 259 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, 260 &p_chain->prod_idx, 261 &p_chain->pbl.prod_page_idx); 262 } 263 264 ret = p_chain->p_prod_elem; 265 p_chain->prod_idx++; 266 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + 267 p_chain->elem_size); 268 269 return ret; 270 } 271 272 /** 273 * @brief qed_chain_get_capacity - 274 * 275 * Get the maximum number of BDs in chain 276 * 277 * @param p_chain 278 * @param num 279 * 280 * @return u16, number of unusable BDs 281 */ 282 static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain) 283 { 284 return p_chain->capacity; 285 } 286 287 /** 288 * @brief qed_chain_recycle_consumed - 289 * 290 * Returns an element which was previously consumed; 291 * Increments producers so they could be written to FW. 292 * 293 * @param p_chain 294 */ 295 static inline void 296 qed_chain_recycle_consumed(struct qed_chain *p_chain) 297 { 298 test_ans_skip(p_chain, prod_idx); 299 p_chain->prod_idx++; 300 } 301 302 /** 303 * @brief qed_chain_consume - 304 * 305 * A Chain in which the driver utilizes data written by a different source 306 * (i.e., FW) should use this to access passed buffers. 307 * 308 * @param p_chain 309 * 310 * @return void*, a pointer to the next buffer written 311 */ 312 static inline void *qed_chain_consume(struct qed_chain *p_chain) 313 { 314 void *ret = NULL; 315 316 if ((p_chain->cons_idx & p_chain->elem_per_page_mask) == 317 p_chain->next_page_mask) { 318 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, 319 &p_chain->cons_idx, 320 &p_chain->pbl.cons_page_idx); 321 } 322 323 ret = p_chain->p_cons_elem; 324 p_chain->cons_idx++; 325 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + 326 p_chain->elem_size); 327 328 return ret; 329 } 330 331 /** 332 * @brief qed_chain_reset - Resets the chain to its start state 333 * 334 * @param p_chain pointer to a previously allocted chain 335 */ 336 static inline void qed_chain_reset(struct qed_chain *p_chain) 337 { 338 int i; 339 340 p_chain->prod_idx = 0; 341 p_chain->cons_idx = 0; 342 p_chain->p_cons_elem = p_chain->p_virt_addr; 343 p_chain->p_prod_elem = p_chain->p_virt_addr; 344 345 if (p_chain->mode == QED_CHAIN_MODE_PBL) { 346 p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1; 347 p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1; 348 } 349 350 switch (p_chain->intended_use) { 351 case QED_CHAIN_USE_TO_CONSUME_PRODUCE: 352 case QED_CHAIN_USE_TO_PRODUCE: 353 /* Do nothing */ 354 break; 355 356 case QED_CHAIN_USE_TO_CONSUME: 357 /* produce empty elements */ 358 for (i = 0; i < p_chain->capacity; i++) 359 qed_chain_recycle_consumed(p_chain); 360 break; 361 } 362 } 363 364 /** 365 * @brief qed_chain_init - Initalizes a basic chain struct 366 * 367 * @param p_chain 368 * @param p_virt_addr 369 * @param p_phys_addr physical address of allocated buffer's beginning 370 * @param page_cnt number of pages in the allocated buffer 371 * @param elem_size size of each element in the chain 372 * @param intended_use 373 * @param mode 374 */ 375 static inline void qed_chain_init(struct qed_chain *p_chain, 376 void *p_virt_addr, 377 dma_addr_t p_phys_addr, 378 u16 page_cnt, 379 u8 elem_size, 380 enum qed_chain_use_mode intended_use, 381 enum qed_chain_mode mode) 382 { 383 /* chain fixed parameters */ 384 p_chain->p_virt_addr = p_virt_addr; 385 p_chain->p_phys_addr = p_phys_addr; 386 p_chain->elem_size = elem_size; 387 p_chain->page_cnt = page_cnt; 388 p_chain->mode = mode; 389 390 p_chain->intended_use = intended_use; 391 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); 392 p_chain->usable_per_page = 393 USABLE_ELEMS_PER_PAGE(elem_size, mode); 394 p_chain->capacity = p_chain->usable_per_page * page_cnt; 395 p_chain->size = p_chain->elem_per_page * page_cnt; 396 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; 397 398 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); 399 400 p_chain->next_page_mask = (p_chain->usable_per_page & 401 p_chain->elem_per_page_mask); 402 403 if (mode == QED_CHAIN_MODE_NEXT_PTR) { 404 struct qed_chain_next *p_next; 405 u16 i; 406 407 for (i = 0; i < page_cnt - 1; i++) { 408 /* Increment mem_phy to the next page. */ 409 p_phys_addr += QED_CHAIN_PAGE_SIZE; 410 411 /* Initialize the physical address of the next page. */ 412 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + 413 elem_size * 414 p_chain-> 415 usable_per_page); 416 417 p_next->next_phys.lo = DMA_LO_LE(p_phys_addr); 418 p_next->next_phys.hi = DMA_HI_LE(p_phys_addr); 419 420 /* Initialize the virtual address of the next page. */ 421 p_next->next_virt = (void *)((u8 *)p_virt_addr + 422 QED_CHAIN_PAGE_SIZE); 423 424 /* Move to the next page. */ 425 p_virt_addr = p_next->next_virt; 426 } 427 428 /* Last page's next should point to beginning of the chain */ 429 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + 430 elem_size * 431 p_chain->usable_per_page); 432 433 p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr); 434 p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr); 435 p_next->next_virt = p_chain->p_virt_addr; 436 } 437 qed_chain_reset(p_chain); 438 } 439 440 /** 441 * @brief qed_chain_pbl_init - Initalizes a basic pbl chain 442 * struct 443 * @param p_chain 444 * @param p_virt_addr virtual address of allocated buffer's beginning 445 * @param p_phys_addr physical address of allocated buffer's beginning 446 * @param page_cnt number of pages in the allocated buffer 447 * @param elem_size size of each element in the chain 448 * @param use_mode 449 * @param p_phys_pbl pointer to a pre-allocated side table 450 * which will hold physical page addresses. 451 * @param p_virt_pbl pointer to a pre allocated side table 452 * which will hold virtual page addresses. 453 */ 454 static inline void 455 qed_chain_pbl_init(struct qed_chain *p_chain, 456 void *p_virt_addr, 457 dma_addr_t p_phys_addr, 458 u16 page_cnt, 459 u8 elem_size, 460 enum qed_chain_use_mode use_mode, 461 dma_addr_t p_phys_pbl, 462 dma_addr_t *p_virt_pbl) 463 { 464 dma_addr_t *p_pbl_dma = p_virt_pbl; 465 int i; 466 467 qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt, 468 elem_size, use_mode, QED_CHAIN_MODE_PBL); 469 470 p_chain->pbl.p_phys_table = p_phys_pbl; 471 p_chain->pbl.p_virt_table = p_virt_pbl; 472 473 /* Fill the PBL with physical addresses*/ 474 for (i = 0; i < page_cnt; i++) { 475 *p_pbl_dma = p_phys_addr; 476 p_phys_addr += QED_CHAIN_PAGE_SIZE; 477 p_pbl_dma++; 478 } 479 } 480 481 /** 482 * @brief qed_chain_set_prod - sets the prod to the given 483 * value 484 * 485 * @param prod_idx 486 * @param p_prod_elem 487 */ 488 static inline void qed_chain_set_prod(struct qed_chain *p_chain, 489 u16 prod_idx, 490 void *p_prod_elem) 491 { 492 p_chain->prod_idx = prod_idx; 493 p_chain->p_prod_elem = p_prod_elem; 494 } 495 496 /** 497 * @brief qed_chain_get_elem - 498 * 499 * get a pointer to an element represented by absolute idx 500 * 501 * @param p_chain 502 * @assumption p_chain->size is a power of 2 503 * 504 * @return void*, a pointer to next element 505 */ 506 static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain, 507 u16 idx) 508 { 509 void *ret = NULL; 510 511 if (idx >= p_chain->size) 512 return NULL; 513 514 ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx; 515 516 return ret; 517 } 518 519 /** 520 * @brief qed_chain_sge_inc_cons_prod 521 * 522 * for sge chains, producer isn't increased serially, the ring 523 * is expected to be full at all times. Once elements are 524 * consumed, they are immediately produced. 525 * 526 * @param p_chain 527 * @param cnt 528 * 529 * @return inline void 530 */ 531 static inline void 532 qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain, 533 u16 cnt) 534 { 535 p_chain->prod_idx += cnt; 536 p_chain->cons_idx += cnt; 537 } 538 539 #endif 540