1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/errno.h>
7 #include <linux/io.h>
8 #include <linux/slab.h>
9 #include <linux/etherdevice.h>
10 #include "ionic.h"
11 #include "ionic_dev.h"
12 #include "ionic_lif.h"
13 
14 static void ionic_watchdog_cb(struct timer_list *t)
15 {
16 	struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
17 	struct ionic_lif *lif = ionic->lif;
18 	struct ionic_deferred_work *work;
19 	int hb;
20 
21 	mod_timer(&ionic->watchdog_timer,
22 		  round_jiffies(jiffies + ionic->watchdog_period));
23 
24 	if (!lif)
25 		return;
26 
27 	hb = ionic_heartbeat_check(ionic);
28 	dev_dbg(ionic->dev, "%s: hb %d running %d UP %d\n",
29 		__func__, hb, netif_running(lif->netdev),
30 		test_bit(IONIC_LIF_F_UP, lif->state));
31 
32 	if (hb >= 0 &&
33 	    !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
34 		ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
35 
36 	if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state) &&
37 	    !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
38 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
39 		if (!work) {
40 			netdev_err(lif->netdev, "rxmode change dropped\n");
41 			return;
42 		}
43 
44 		work->type = IONIC_DW_TYPE_RX_MODE;
45 		netdev_dbg(lif->netdev, "deferred: rx_mode\n");
46 		ionic_lif_deferred_enqueue(lif, work);
47 	}
48 }
49 
50 static void ionic_napi_schedule_do_softirq(struct napi_struct *napi)
51 {
52 	local_bh_disable();
53 	napi_schedule(napi);
54 	local_bh_enable();
55 }
56 
57 static int ionic_get_preferred_cpu(struct ionic *ionic,
58 				   struct ionic_intr_info *intr)
59 {
60 	int cpu;
61 
62 	cpu = cpumask_first_and(*intr->affinity_mask, cpu_online_mask);
63 	if (cpu >= nr_cpu_ids)
64 		cpu = cpumask_local_spread(0, dev_to_node(ionic->dev));
65 
66 	return cpu;
67 }
68 
69 static void ionic_doorbell_check_dwork(struct work_struct *work)
70 {
71 	struct ionic *ionic = container_of(work, struct ionic,
72 					   doorbell_check_dwork.work);
73 	struct ionic_lif *lif = ionic->lif;
74 
75 	mutex_lock(&lif->queue_lock);
76 
77 	if (test_bit(IONIC_LIF_F_FW_STOPPING, lif->state) ||
78 	    test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
79 		mutex_unlock(&lif->queue_lock);
80 		return;
81 	}
82 
83 	ionic_napi_schedule_do_softirq(&lif->adminqcq->napi);
84 
85 	if (test_bit(IONIC_LIF_F_UP, lif->state)) {
86 		int i;
87 
88 		for (i = 0; i < lif->nxqs; i++) {
89 			ionic_napi_schedule_do_softirq(&lif->txqcqs[i]->napi);
90 			ionic_napi_schedule_do_softirq(&lif->rxqcqs[i]->napi);
91 		}
92 
93 		if (lif->hwstamp_txq &&
94 		    lif->hwstamp_txq->flags & IONIC_QCQ_F_INTR)
95 			ionic_napi_schedule_do_softirq(&lif->hwstamp_txq->napi);
96 		if (lif->hwstamp_rxq &&
97 		    lif->hwstamp_rxq->flags & IONIC_QCQ_F_INTR)
98 			ionic_napi_schedule_do_softirq(&lif->hwstamp_rxq->napi);
99 	}
100 	mutex_unlock(&lif->queue_lock);
101 
102 	ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
103 }
104 
105 static int ionic_watchdog_init(struct ionic *ionic)
106 {
107 	struct ionic_dev *idev = &ionic->idev;
108 
109 	timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
110 	ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
111 
112 	/* set times to ensure the first check will proceed */
113 	atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ);
114 	idev->last_hb_time = jiffies - 2 * ionic->watchdog_period;
115 	/* init as ready, so no transition if the first check succeeds */
116 	idev->last_fw_hb = 0;
117 	idev->fw_hb_ready = true;
118 	idev->fw_status_ready = true;
119 	idev->fw_generation = IONIC_FW_STS_F_GENERATION &
120 			      ioread8(&idev->dev_info_regs->fw_status);
121 
122 	ionic->wq = alloc_workqueue("%s-wq", WQ_UNBOUND, 0,
123 				    dev_name(ionic->dev));
124 	if (!ionic->wq) {
125 		dev_err(ionic->dev, "alloc_workqueue failed");
126 		return -ENOMEM;
127 	}
128 	INIT_DELAYED_WORK(&ionic->doorbell_check_dwork,
129 			  ionic_doorbell_check_dwork);
130 
131 	return 0;
132 }
133 
134 void ionic_queue_doorbell_check(struct ionic *ionic, int delay)
135 {
136 	int cpu;
137 
138 	cpu = ionic_get_preferred_cpu(ionic, &ionic->lif->adminqcq->intr);
139 	queue_delayed_work_on(cpu, ionic->wq, &ionic->doorbell_check_dwork,
140 			      delay);
141 }
142 
143 void ionic_init_devinfo(struct ionic *ionic)
144 {
145 	struct ionic_dev *idev = &ionic->idev;
146 
147 	idev->dev_info.asic_type = ioread8(&idev->dev_info_regs->asic_type);
148 	idev->dev_info.asic_rev = ioread8(&idev->dev_info_regs->asic_rev);
149 
150 	memcpy_fromio(idev->dev_info.fw_version,
151 		      idev->dev_info_regs->fw_version,
152 		      IONIC_DEVINFO_FWVERS_BUFLEN);
153 
154 	memcpy_fromio(idev->dev_info.serial_num,
155 		      idev->dev_info_regs->serial_num,
156 		      IONIC_DEVINFO_SERIAL_BUFLEN);
157 
158 	idev->dev_info.fw_version[IONIC_DEVINFO_FWVERS_BUFLEN] = 0;
159 	idev->dev_info.serial_num[IONIC_DEVINFO_SERIAL_BUFLEN] = 0;
160 
161 	dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version);
162 }
163 
164 int ionic_dev_setup(struct ionic *ionic)
165 {
166 	struct ionic_dev_bar *bar = ionic->bars;
167 	unsigned int num_bars = ionic->num_bars;
168 	struct ionic_dev *idev = &ionic->idev;
169 	struct device *dev = ionic->dev;
170 	int size;
171 	u32 sig;
172 	int err;
173 
174 	/* BAR0: dev_cmd and interrupts */
175 	if (num_bars < 1) {
176 		dev_err(dev, "No bars found, aborting\n");
177 		return -EFAULT;
178 	}
179 
180 	if (bar->len < IONIC_BAR0_SIZE) {
181 		dev_err(dev, "Resource bar size %lu too small, aborting\n",
182 			bar->len);
183 		return -EFAULT;
184 	}
185 
186 	idev->dev_info_regs = bar->vaddr + IONIC_BAR0_DEV_INFO_REGS_OFFSET;
187 	idev->dev_cmd_regs = bar->vaddr + IONIC_BAR0_DEV_CMD_REGS_OFFSET;
188 	idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET;
189 	idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET;
190 
191 	idev->hwstamp_regs = &idev->dev_info_regs->hwstamp;
192 
193 	sig = ioread32(&idev->dev_info_regs->signature);
194 	if (sig != IONIC_DEV_INFO_SIGNATURE) {
195 		dev_err(dev, "Incompatible firmware signature %x", sig);
196 		return -EFAULT;
197 	}
198 
199 	ionic_init_devinfo(ionic);
200 
201 	/* BAR1: doorbells */
202 	bar++;
203 	if (num_bars < 2) {
204 		dev_err(dev, "Doorbell bar missing, aborting\n");
205 		return -EFAULT;
206 	}
207 
208 	err = ionic_watchdog_init(ionic);
209 	if (err)
210 		return err;
211 
212 	idev->db_pages = bar->vaddr;
213 	idev->phy_db_pages = bar->bus_addr;
214 
215 	/* BAR2: optional controller memory mapping */
216 	bar++;
217 	mutex_init(&idev->cmb_inuse_lock);
218 	if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
219 		idev->cmb_inuse = NULL;
220 		return 0;
221 	}
222 
223 	idev->phy_cmb_pages = bar->bus_addr;
224 	idev->cmb_npages = bar->len / PAGE_SIZE;
225 	size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
226 	idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
227 	if (!idev->cmb_inuse)
228 		dev_warn(dev, "No memory for CMB, disabling\n");
229 
230 	return 0;
231 }
232 
233 void ionic_dev_teardown(struct ionic *ionic)
234 {
235 	struct ionic_dev *idev = &ionic->idev;
236 
237 	kfree(idev->cmb_inuse);
238 	idev->cmb_inuse = NULL;
239 	idev->phy_cmb_pages = 0;
240 	idev->cmb_npages = 0;
241 
242 	destroy_workqueue(ionic->wq);
243 	mutex_destroy(&idev->cmb_inuse_lock);
244 }
245 
246 /* Devcmd Interface */
247 static bool __ionic_is_fw_running(struct ionic_dev *idev, u8 *status_ptr)
248 {
249 	u8 fw_status;
250 
251 	if (!idev->dev_info_regs) {
252 		if (status_ptr)
253 			*status_ptr = 0xff;
254 		return false;
255 	}
256 
257 	fw_status = ioread8(&idev->dev_info_regs->fw_status);
258 	if (status_ptr)
259 		*status_ptr = fw_status;
260 
261 	/* firmware is useful only if the running bit is set and
262 	 * fw_status != 0xff (bad PCI read)
263 	 */
264 	return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
265 }
266 
267 bool ionic_is_fw_running(struct ionic_dev *idev)
268 {
269 	return __ionic_is_fw_running(idev, NULL);
270 }
271 
272 int ionic_heartbeat_check(struct ionic *ionic)
273 {
274 	unsigned long check_time, last_check_time;
275 	struct ionic_dev *idev = &ionic->idev;
276 	struct ionic_lif *lif = ionic->lif;
277 	bool fw_status_ready = true;
278 	bool fw_hb_ready;
279 	u8 fw_generation;
280 	u8 fw_status;
281 	u32 fw_hb;
282 
283 	/* wait a least one second before testing again */
284 	check_time = jiffies;
285 	last_check_time = atomic_long_read(&idev->last_check_time);
286 do_check_time:
287 	if (time_before(check_time, last_check_time + HZ))
288 		return 0;
289 	if (!atomic_long_try_cmpxchg_relaxed(&idev->last_check_time,
290 					     &last_check_time, check_time)) {
291 		/* if called concurrently, only the first should proceed. */
292 		dev_dbg(ionic->dev, "%s: do_check_time again\n", __func__);
293 		goto do_check_time;
294 	}
295 
296 	/* If fw_status is not ready don't bother with the generation */
297 	if (!__ionic_is_fw_running(idev, &fw_status)) {
298 		fw_status_ready = false;
299 	} else {
300 		fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
301 		if (idev->fw_generation != fw_generation) {
302 			dev_info(ionic->dev, "FW generation 0x%02x -> 0x%02x\n",
303 				 idev->fw_generation, fw_generation);
304 
305 			idev->fw_generation = fw_generation;
306 
307 			/* If the generation changed, the fw status is not
308 			 * ready so we need to trigger a fw-down cycle.  After
309 			 * the down, the next watchdog will see the fw is up
310 			 * and the generation value stable, so will trigger
311 			 * the fw-up activity.
312 			 *
313 			 * If we had already moved to FW_RESET from a RESET event,
314 			 * it is possible that we never saw the fw_status go to 0,
315 			 * so we fake the current idev->fw_status_ready here to
316 			 * force the transition and get FW up again.
317 			 */
318 			if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
319 				idev->fw_status_ready = false;	/* go to running */
320 			else
321 				fw_status_ready = false;	/* go to down */
322 		}
323 	}
324 
325 	dev_dbg(ionic->dev, "fw_status 0x%02x ready %d idev->ready %d last_hb 0x%x state 0x%02lx\n",
326 		fw_status, fw_status_ready, idev->fw_status_ready,
327 		idev->last_fw_hb, lif->state[0]);
328 
329 	/* is this a transition? */
330 	if (fw_status_ready != idev->fw_status_ready &&
331 	    !test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
332 		bool trigger = false;
333 
334 		idev->fw_status_ready = fw_status_ready;
335 
336 		if (!fw_status_ready &&
337 		    !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
338 		    !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
339 			dev_info(ionic->dev, "FW stopped 0x%02x\n", fw_status);
340 			trigger = true;
341 
342 		} else if (fw_status_ready &&
343 			   test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
344 			dev_info(ionic->dev, "FW running 0x%02x\n", fw_status);
345 			trigger = true;
346 		}
347 
348 		if (trigger) {
349 			struct ionic_deferred_work *work;
350 
351 			work = kzalloc(sizeof(*work), GFP_ATOMIC);
352 			if (work) {
353 				work->type = IONIC_DW_TYPE_LIF_RESET;
354 				work->fw_status = fw_status_ready;
355 				ionic_lif_deferred_enqueue(lif, work);
356 			}
357 		}
358 	}
359 
360 	if (!idev->fw_status_ready)
361 		return -ENXIO;
362 
363 	/* Because of some variability in the actual FW heartbeat, we
364 	 * wait longer than the DEVCMD_TIMEOUT before checking again.
365 	 */
366 	last_check_time = idev->last_hb_time;
367 	if (time_before(check_time, last_check_time + DEVCMD_TIMEOUT * 2 * HZ))
368 		return 0;
369 
370 	fw_hb = ioread32(&idev->dev_info_regs->fw_heartbeat);
371 	fw_hb_ready = fw_hb != idev->last_fw_hb;
372 
373 	/* early FW version had no heartbeat, so fake it */
374 	if (!fw_hb_ready && !fw_hb)
375 		fw_hb_ready = true;
376 
377 	dev_dbg(ionic->dev, "%s: fw_hb %u last_fw_hb %u ready %u\n",
378 		__func__, fw_hb, idev->last_fw_hb, fw_hb_ready);
379 
380 	idev->last_fw_hb = fw_hb;
381 
382 	/* log a transition */
383 	if (fw_hb_ready != idev->fw_hb_ready) {
384 		idev->fw_hb_ready = fw_hb_ready;
385 		if (!fw_hb_ready)
386 			dev_info(ionic->dev, "FW heartbeat stalled at %d\n", fw_hb);
387 		else
388 			dev_info(ionic->dev, "FW heartbeat restored at %d\n", fw_hb);
389 	}
390 
391 	if (!fw_hb_ready)
392 		return -ENXIO;
393 
394 	idev->last_hb_time = check_time;
395 
396 	return 0;
397 }
398 
399 u8 ionic_dev_cmd_status(struct ionic_dev *idev)
400 {
401 	if (!idev->dev_cmd_regs)
402 		return (u8)PCI_ERROR_RESPONSE;
403 	return ioread8(&idev->dev_cmd_regs->comp.comp.status);
404 }
405 
406 bool ionic_dev_cmd_done(struct ionic_dev *idev)
407 {
408 	if (!idev->dev_cmd_regs)
409 		return false;
410 	return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
411 }
412 
413 void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
414 {
415 	if (!idev->dev_cmd_regs)
416 		return;
417 	memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
418 }
419 
420 void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
421 {
422 	idev->opcode = cmd->cmd.opcode;
423 
424 	if (!idev->dev_cmd_regs)
425 		return;
426 
427 	memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
428 	iowrite32(0, &idev->dev_cmd_regs->done);
429 	iowrite32(1, &idev->dev_cmd_regs->doorbell);
430 }
431 
432 /* Device commands */
433 void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver)
434 {
435 	union ionic_dev_cmd cmd = {
436 		.identify.opcode = IONIC_CMD_IDENTIFY,
437 		.identify.ver = ver,
438 	};
439 
440 	ionic_dev_cmd_go(idev, &cmd);
441 }
442 
443 void ionic_dev_cmd_init(struct ionic_dev *idev)
444 {
445 	union ionic_dev_cmd cmd = {
446 		.init.opcode = IONIC_CMD_INIT,
447 		.init.type = 0,
448 	};
449 
450 	ionic_dev_cmd_go(idev, &cmd);
451 }
452 
453 void ionic_dev_cmd_reset(struct ionic_dev *idev)
454 {
455 	union ionic_dev_cmd cmd = {
456 		.reset.opcode = IONIC_CMD_RESET,
457 	};
458 
459 	ionic_dev_cmd_go(idev, &cmd);
460 }
461 
462 /* Port commands */
463 void ionic_dev_cmd_port_identify(struct ionic_dev *idev)
464 {
465 	union ionic_dev_cmd cmd = {
466 		.port_init.opcode = IONIC_CMD_PORT_IDENTIFY,
467 		.port_init.index = 0,
468 	};
469 
470 	ionic_dev_cmd_go(idev, &cmd);
471 }
472 
473 void ionic_dev_cmd_port_init(struct ionic_dev *idev)
474 {
475 	union ionic_dev_cmd cmd = {
476 		.port_init.opcode = IONIC_CMD_PORT_INIT,
477 		.port_init.index = 0,
478 		.port_init.info_pa = cpu_to_le64(idev->port_info_pa),
479 	};
480 
481 	ionic_dev_cmd_go(idev, &cmd);
482 }
483 
484 void ionic_dev_cmd_port_reset(struct ionic_dev *idev)
485 {
486 	union ionic_dev_cmd cmd = {
487 		.port_reset.opcode = IONIC_CMD_PORT_RESET,
488 		.port_reset.index = 0,
489 	};
490 
491 	ionic_dev_cmd_go(idev, &cmd);
492 }
493 
494 void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state)
495 {
496 	union ionic_dev_cmd cmd = {
497 		.port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
498 		.port_setattr.index = 0,
499 		.port_setattr.attr = IONIC_PORT_ATTR_STATE,
500 		.port_setattr.state = state,
501 	};
502 
503 	ionic_dev_cmd_go(idev, &cmd);
504 }
505 
506 void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed)
507 {
508 	union ionic_dev_cmd cmd = {
509 		.port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
510 		.port_setattr.index = 0,
511 		.port_setattr.attr = IONIC_PORT_ATTR_SPEED,
512 		.port_setattr.speed = cpu_to_le32(speed),
513 	};
514 
515 	ionic_dev_cmd_go(idev, &cmd);
516 }
517 
518 void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable)
519 {
520 	union ionic_dev_cmd cmd = {
521 		.port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
522 		.port_setattr.index = 0,
523 		.port_setattr.attr = IONIC_PORT_ATTR_AUTONEG,
524 		.port_setattr.an_enable = an_enable,
525 	};
526 
527 	ionic_dev_cmd_go(idev, &cmd);
528 }
529 
530 void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type)
531 {
532 	union ionic_dev_cmd cmd = {
533 		.port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
534 		.port_setattr.index = 0,
535 		.port_setattr.attr = IONIC_PORT_ATTR_FEC,
536 		.port_setattr.fec_type = fec_type,
537 	};
538 
539 	ionic_dev_cmd_go(idev, &cmd);
540 }
541 
542 void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type)
543 {
544 	union ionic_dev_cmd cmd = {
545 		.port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
546 		.port_setattr.index = 0,
547 		.port_setattr.attr = IONIC_PORT_ATTR_PAUSE,
548 		.port_setattr.pause_type = pause_type,
549 	};
550 
551 	ionic_dev_cmd_go(idev, &cmd);
552 }
553 
554 /* VF commands */
555 int ionic_set_vf_config(struct ionic *ionic, int vf,
556 			struct ionic_vf_setattr_cmd *vfc)
557 {
558 	union ionic_dev_cmd cmd = {
559 		.vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
560 		.vf_setattr.attr = vfc->attr,
561 		.vf_setattr.vf_index = cpu_to_le16(vf),
562 	};
563 	int err;
564 
565 	memcpy(cmd.vf_setattr.pad, vfc->pad, sizeof(vfc->pad));
566 
567 	mutex_lock(&ionic->dev_cmd_lock);
568 	ionic_dev_cmd_go(&ionic->idev, &cmd);
569 	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
570 	mutex_unlock(&ionic->dev_cmd_lock);
571 
572 	return err;
573 }
574 
575 void ionic_vf_start(struct ionic *ionic)
576 {
577 	union ionic_dev_cmd cmd = {
578 		.vf_ctrl.opcode = IONIC_CMD_VF_CTRL,
579 		.vf_ctrl.ctrl_opcode = IONIC_VF_CTRL_START_ALL,
580 	};
581 
582 	if (!(ionic->ident.dev.capabilities & cpu_to_le64(IONIC_DEV_CAP_VF_CTRL)))
583 		return;
584 
585 	ionic_dev_cmd_go(&ionic->idev, &cmd);
586 	ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
587 }
588 
589 /* LIF commands */
590 void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
591 				  u16 lif_type, u8 qtype, u8 qver)
592 {
593 	union ionic_dev_cmd cmd = {
594 		.q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
595 		.q_identify.lif_type = cpu_to_le16(lif_type),
596 		.q_identify.type = qtype,
597 		.q_identify.ver = qver,
598 	};
599 
600 	ionic_dev_cmd_go(idev, &cmd);
601 }
602 
603 void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver)
604 {
605 	union ionic_dev_cmd cmd = {
606 		.lif_identify.opcode = IONIC_CMD_LIF_IDENTIFY,
607 		.lif_identify.type = type,
608 		.lif_identify.ver = ver,
609 	};
610 
611 	ionic_dev_cmd_go(idev, &cmd);
612 }
613 
614 void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
615 			    dma_addr_t info_pa)
616 {
617 	union ionic_dev_cmd cmd = {
618 		.lif_init.opcode = IONIC_CMD_LIF_INIT,
619 		.lif_init.index = cpu_to_le16(lif_index),
620 		.lif_init.info_pa = cpu_to_le64(info_pa),
621 	};
622 
623 	ionic_dev_cmd_go(idev, &cmd);
624 }
625 
626 void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index)
627 {
628 	union ionic_dev_cmd cmd = {
629 		.lif_init.opcode = IONIC_CMD_LIF_RESET,
630 		.lif_init.index = cpu_to_le16(lif_index),
631 	};
632 
633 	ionic_dev_cmd_go(idev, &cmd);
634 }
635 
636 void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
637 			       u16 lif_index, u16 intr_index)
638 {
639 	struct ionic_queue *q = &qcq->q;
640 	struct ionic_cq *cq = &qcq->cq;
641 
642 	union ionic_dev_cmd cmd = {
643 		.q_init.opcode = IONIC_CMD_Q_INIT,
644 		.q_init.lif_index = cpu_to_le16(lif_index),
645 		.q_init.type = q->type,
646 		.q_init.ver = qcq->q.lif->qtype_info[q->type].version,
647 		.q_init.index = cpu_to_le32(q->index),
648 		.q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
649 					    IONIC_QINIT_F_ENA),
650 		.q_init.pid = cpu_to_le16(q->pid),
651 		.q_init.intr_index = cpu_to_le16(intr_index),
652 		.q_init.ring_size = ilog2(q->num_descs),
653 		.q_init.ring_base = cpu_to_le64(q->base_pa),
654 		.q_init.cq_ring_base = cpu_to_le64(cq->base_pa),
655 	};
656 
657 	ionic_dev_cmd_go(idev, &cmd);
658 }
659 
660 int ionic_db_page_num(struct ionic_lif *lif, int pid)
661 {
662 	return (lif->hw_index * lif->dbid_count) + pid;
663 }
664 
665 int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
666 {
667 	struct ionic_dev *idev = &lif->ionic->idev;
668 	int ret;
669 
670 	mutex_lock(&idev->cmb_inuse_lock);
671 	ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
672 	mutex_unlock(&idev->cmb_inuse_lock);
673 
674 	if (ret < 0)
675 		return ret;
676 
677 	*pgid = ret;
678 	*pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;
679 
680 	return 0;
681 }
682 
683 void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
684 {
685 	struct ionic_dev *idev = &lif->ionic->idev;
686 
687 	mutex_lock(&idev->cmb_inuse_lock);
688 	bitmap_release_region(idev->cmb_inuse, pgid, order);
689 	mutex_unlock(&idev->cmb_inuse_lock);
690 }
691 
692 int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
693 		  struct ionic_intr_info *intr,
694 		  unsigned int num_descs, size_t desc_size)
695 {
696 	unsigned int ring_size;
697 
698 	if (desc_size == 0 || !is_power_of_2(num_descs))
699 		return -EINVAL;
700 
701 	ring_size = ilog2(num_descs);
702 	if (ring_size < 2 || ring_size > 16)
703 		return -EINVAL;
704 
705 	cq->lif = lif;
706 	cq->bound_intr = intr;
707 	cq->num_descs = num_descs;
708 	cq->desc_size = desc_size;
709 	cq->tail_idx = 0;
710 	cq->done_color = 1;
711 	cq->idev = &lif->ionic->idev;
712 
713 	return 0;
714 }
715 
716 unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
717 			      ionic_cq_cb cb, ionic_cq_done_cb done_cb,
718 			      void *done_arg)
719 {
720 	unsigned int work_done = 0;
721 
722 	if (work_to_do == 0)
723 		return 0;
724 
725 	while (cb(cq)) {
726 		if (cq->tail_idx == cq->num_descs - 1)
727 			cq->done_color = !cq->done_color;
728 
729 		cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
730 
731 		if (++work_done >= work_to_do)
732 			break;
733 	}
734 
735 	if (work_done && done_cb)
736 		done_cb(done_arg);
737 
738 	return work_done;
739 }
740 
741 int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
742 		 struct ionic_queue *q, unsigned int index, const char *name,
743 		 unsigned int num_descs, size_t desc_size,
744 		 size_t sg_desc_size, unsigned int pid)
745 {
746 	unsigned int ring_size;
747 
748 	if (desc_size == 0 || !is_power_of_2(num_descs))
749 		return -EINVAL;
750 
751 	ring_size = ilog2(num_descs);
752 	if (ring_size < 2 || ring_size > 16)
753 		return -EINVAL;
754 
755 	q->lif = lif;
756 	q->index = index;
757 	q->num_descs = num_descs;
758 	q->desc_size = desc_size;
759 	q->sg_desc_size = sg_desc_size;
760 	q->tail_idx = 0;
761 	q->head_idx = 0;
762 	q->pid = pid;
763 
764 	snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index);
765 
766 	return 0;
767 }
768 
769 void ionic_q_post(struct ionic_queue *q, bool ring_doorbell)
770 {
771 	struct ionic_lif *lif = q->lif;
772 	struct device *dev = q->dev;
773 
774 	q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
775 
776 	dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
777 		q->lif->index, q->name, q->hw_type, q->hw_index,
778 		q->head_idx, ring_doorbell);
779 
780 	if (ring_doorbell) {
781 		ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
782 				 q->dbval | q->head_idx);
783 
784 		q->dbell_jiffies = jiffies;
785 	}
786 }
787 
788 bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
789 {
790 	unsigned int mask, tail, head;
791 
792 	mask = q->num_descs - 1;
793 	tail = q->tail_idx;
794 	head = q->head_idx;
795 
796 	return ((pos - tail) & mask) < ((head - tail) & mask);
797 }
798