| /linux-6.15/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_hw_eqs.c | 49 ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \ 58 #define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx) argument 60 #define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx) argument 270 if (eq->cons_idx == eq->q_len) { in aeq_irq_handler() 272 eq->wrapped = !eq->wrapped; in aeq_irq_handler() 334 if (eq->cons_idx == eq->q_len) { in ceq_irq_handler() 336 eq->wrapped = !eq->wrapped; in ceq_irq_handler() 797 snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_aeq%d@pci:%s", eq->q_id, in init_eq() 801 snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_ceq%d@pci:%s", eq->q_id, in init_eq() 826 free_irq(eq->msix_entry.vector, eq); in remove_eq() [all …]
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | eq.c | 608 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); in setup_async_eq() 621 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); in cleanup_async_eq() 753 if (!eq) in mlx5_eq_create_generic() 763 return eq; in mlx5_eq_create_generic() 950 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); in destroy_comp_eq() 955 kfree(eq); in destroy_comp_eq() 999 if (!eq) { in create_comp_eq() 1019 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); in create_comp_eq() 1034 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); in create_comp_eq() 1056 if (eq) { in mlx5_comp_eqn_get() [all …]
|
| H A D | cq.c | 106 struct mlx5_eq_comp *eq; in mlx5_create_cq() local 109 eq = mlx5_eqn2comp_eq(dev, eqn); in mlx5_create_cq() 110 if (IS_ERR(eq)) in mlx5_create_cq() 111 return PTR_ERR(eq); in mlx5_create_cq() 122 cq->eq = eq; in mlx5_create_cq() 129 cq->tasklet_ctx.priv = &eq->tasklet_ctx; in mlx5_create_cq() 133 err = mlx5_eq_add_cq(&eq->core, cq); in mlx5_create_cq() 149 cq->irqn = eq->core.irqn; in mlx5_create_cq() 154 mlx5_eq_del_cq(&eq->core, cq); in mlx5_create_cq() 182 mlx5_eq_del_cq(&cq->eq->core, cq); in mlx5_core_destroy_cq()
|
| /linux-6.15/drivers/infiniband/hw/erdma/ |
| H A D | erdma_eq.c | 16 *eq->dbrec = db_data; in notify_eq() 24 u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT); in get_next_valid_eqe() 27 return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL; in get_next_valid_eqe() 89 if (!eq->qbuf) in erdma_eq_common_init() 93 if (!eq->dbrec) in erdma_eq_common_init() 99 eq->ci = 0; in erdma_eq_common_init() 100 eq->depth = depth; in erdma_eq_common_init() 113 dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma); in erdma_eq_destroy() 156 ceq_cb->eq.ci++; in erdma_ceq_completion_handler() 245 struct erdma_eq *eq = &dev->ceqs[ceqn].eq; in erdma_ceq_init_one() local [all …]
|
| /linux-6.15/sound/pci/au88x0/ |
| H A D | au88x0_eq.c | 495 eqlzr_t *eq = &(vortex->eq); in vortex_Eqlzr_GetLeftGain() local 506 eqlzr_t *eq = &(vortex->eq); in vortex_Eqlzr_SetLeftGain() local 520 eqlzr_t *eq = &(vortex->eq); in vortex_Eqlzr_GetRightGain() local 531 eqlzr_t *eq = &(vortex->eq); in vortex_Eqlzr_SetRightGain() local 547 eqlzr_t *eq = &(vortex->eq); 568 eqlzr_t *eq = &(vortex->eq); in vortex_Eqlzr_SetAllBandsFromActiveCoeffSet() local 579 eqlzr_t *eq = &(vortex->eq); in vortex_Eqlzr_SetAllBands() local 597 eqlzr_t *eq = &(vortex->eq); in vortex_Eqlzr_SetA3dBypassGain() local 613 eqlzr_t *eq = &(vortex->eq); in vortex_Eqlzr_ProgramA3dBypassGain() local 633 eqlzr_t *eq = &(vortex->eq); in vortex_Eqlzr_SetBypass() local [all …]
|
| /linux-6.15/drivers/infiniband/hw/mthca/ |
| H A D | mthca_eq.c | 184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), in tavor_set_eq_ci() 237 eqe = get_eqe(eq, eq->cons_index); in next_eqe_sw() 363 ++eq->cons_index; in mthca_eq_int() 379 set_eq_ci(dev, eq, eq->cons_index); in mthca_eq_int() 424 tavor_set_eq_ci(dev, eq, eq->cons_index); in mthca_tavor_msi_x_interrupt() 458 arbel_set_eq_ci(dev, eq, eq->cons_index); in mthca_arbel_msi_x_interrupt() 478 eq->dev = dev; in mthca_create_eq() 482 eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), in mthca_create_eq() 523 &eq->mr); in mthca_create_eq() 554 eq->eqn_mask = swab32(1 << eq->eqn); in mthca_create_eq() [all …]
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | eq.c | 241 struct mlx4_eq *eq = &priv->eq_table.eq[vec]; in mlx4_set_eq_affinity_hint() local 543 eq->eqn, eq->cons_index, ret); in mlx4_eq_int() 573 eq->eqn, eq->cons_index, ret); in mlx4_eq_int() 698 eq->eqn, eq->cons_index, ret); in mlx4_eq_int() 783 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 808 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 820 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 1023 eq->doorbell = mlx4_get_eq_uar(dev, eq); in mlx4_create_eq() 1226 struct mlx4_eq *eq = &priv->eq_table.eq[i]; in mlx4_init_eq_table() local 1473 eq = &priv->eq_table.eq[requested_vector]; in mlx4_assign_eq() [all …]
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| H A D | eq.h | 51 static inline u32 eq_get_size(struct mlx5_eq *eq) in eq_get_size() argument 53 return eq->fbc.sz_m1 + 1; in eq_get_size() 58 return mlx5_frag_buf_get_wqe(&eq->fbc, entry); in get_eqe() 61 static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) in next_eqe_sw() argument 63 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & eq->fbc.sz_m1); in next_eqe_sw() 65 return (eqe->owner ^ (eq->cons_index >> eq->fbc.log_sz)) & 1 ? NULL : eqe; in next_eqe_sw() 68 static inline void eq_update_ci(struct mlx5_eq *eq, int arm) in eq_update_ci() argument 70 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); in eq_update_ci() 71 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); in eq_update_ci() 83 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); [all …]
|
| /linux-6.15/drivers/scsi/elx/efct/ |
| H A D | efct_hw_queues.c | 35 if (!eq) { in efct_hw_init_queues() 130 struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL); in efct_hw_new_eq() local 132 if (!eq) in efct_hw_new_eq() 139 eq->queue = &hw->eq[eq->instance]; in efct_hw_new_eq() 150 hw->hw_eq[eq->instance] = eq; in efct_hw_new_eq() 154 eq->queue->id, eq->entry_count); in efct_hw_new_eq() 155 return eq; in efct_hw_new_eq() 167 cq->eq = eq; in efct_hw_new_cq() 178 eq->instance, eq->entry_count); in efct_hw_new_cq() 393 if (!eq) in efct_hw_del_eq() [all …]
|
| /linux-6.15/arch/powerpc/kernel/ |
| H A D | cpu_setup_6xx.S | 217 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 218 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq 371 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq 373 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq 374 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq 375 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 376 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq 377 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq 442 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq 444 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq [all …]
|
| /linux-6.15/drivers/pci/controller/ |
| H A D | pcie-iproc-msi.c | 64 unsigned int eq; member 130 unsigned int eq) in iproc_msi_read_reg() argument 139 int eq, u32 val) in iproc_msi_write_reg() argument 334 eq = grp->eq; in iproc_msi_handler() 387 int i, eq; in iproc_msi_enable() local 410 for (eq = 0; eq < msi->nr_irqs; eq++) { in iproc_msi_enable() 422 val |= BIT(eq); in iproc_msi_enable() 430 u32 eq, val; in iproc_msi_disable() local 432 for (eq = 0; eq < msi->nr_irqs; eq++) { in iproc_msi_disable() 435 val &= ~BIT(eq); in iproc_msi_disable() [all …]
|
| /linux-6.15/drivers/net/ethernet/ibm/ehea/ |
| H A D | ehea_qmr.c | 236 struct ehea_eq *eq; in ehea_create_eq() local 238 eq = kzalloc(sizeof(*eq), GFP_KERNEL); in ehea_create_eq() 239 if (!eq) in ehea_create_eq() 249 &eq->attr, &eq->fw_handle); in ehea_create_eq() 255 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, in ehea_create_eq() 290 return eq; in ehea_create_eq() 299 kfree(eq); in ehea_create_eq() 322 hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force); in ehea_destroy_eq_res() 329 kfree(eq); in ehea_destroy_eq_res() 337 if (!eq) in ehea_destroy_eq() [all …]
|
| /linux-6.15/include/linux/mlx5/ |
| H A D | eq.h | 24 mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 25 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 27 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 30 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); 31 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); 41 static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc) in mlx5_eq_update_cc() argument 44 mlx5_eq_update_ci(eq, cc, 0); in mlx5_eq_update_cc()
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| H A D | health.c | 52 void mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg) in mlx5e_health_eq_diag_fmsg() argument 55 devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn); in mlx5e_health_eq_diag_fmsg() 56 devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn); in mlx5e_health_eq_diag_fmsg() 57 devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx); in mlx5e_health_eq_diag_fmsg() 58 devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index); in mlx5e_health_eq_diag_fmsg() 59 devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core)); in mlx5e_health_eq_diag_fmsg() 131 int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq, in mlx5e_health_channel_eq_recover() argument 137 eq->core.eqn, eq->core.cons_index, eq->core.irqn); in mlx5e_health_channel_eq_recover() 139 eqe_count = mlx5_eq_poll_irq_disabled(eq); in mlx5e_health_channel_eq_recover() 144 eqe_count, eq->core.eqn); in mlx5e_health_channel_eq_recover()
|
| /linux-6.15/drivers/net/ethernet/microsoft/mana/ |
| H A D | gdma_main.c | 291 e.eq.id = qid; in mana_gd_ring_doorbell() 293 e.eq.arm = num_req; in mana_gd_ring_doorbell() 357 u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE); in mana_gd_process_eqe() 395 if (!eq->eq.callback) in mana_gd_process_eqe() 400 eq->eq.callback(eq->eq.context, eq, &event); in mana_gd_process_eqe() 452 eq->head++; in mana_gd_process_eq_events() 457 mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id, in mana_gd_process_eq_events() 513 if (queue == eq) { in mana_gd_deregiser_irq() 615 queue->eq.callback = spec->eq.callback; in mana_gd_create_eq() 616 queue->eq.context = spec->eq.context; in mana_gd_create_eq() [all …]
|
| /linux-6.15/arch/hexagon/lib/ |
| H A D | memset.S | 29 p0 = cmp.eq(r2, #0) 59 p1 = cmp.eq(r2, #1) 72 p1 = cmp.eq(r2, #2) 85 p1 = cmp.eq(r2, #4) 98 p1 = cmp.eq(r3, #1) 114 p1 = cmp.eq(r2, #8) 125 p1 = cmp.eq(r2, #4) 136 p1 = cmp.eq(r2, #2) 180 p1 = cmp.eq(r2, #1) 196 p0 = cmp.eq(r2, #2) [all …]
|
| H A D | memcpy.S | 185 p2 = cmp.eq(len, #0); /* =0 */ 188 p1 = cmp.eq(ptr_in, ptr_out); /* attempt to overwrite self */ 261 p1 = cmp.eq(prolog, #0); 267 nokernel = cmp.eq(kernel,#0); 276 p2 = cmp.eq(kernel, #1); /* skip ovr if kernel == 0 */ 346 nokernel = cmp.eq(kernel, #0); /* after adjustment, recheck */ 367 p3 = cmp.eq(kernel, rest); 436 noepilog = cmp.eq(epilog,#0); 443 p3 = cmp.eq(epilogdws, #0); 455 p3 = cmp.eq(kernel, #0);
|
| /linux-6.15/drivers/clk/spear/ |
| H A D | spear1340_clock.c | 268 {.xscale = 4, .yscale = 25, .eq = 0}, 270 {.xscale = 4, .yscale = 21, .eq = 0}, 272 {.xscale = 5, .yscale = 18, .eq = 0}, 274 {.xscale = 2, .yscale = 6, .eq = 0}, 276 {.xscale = 5, .yscale = 12, .eq = 0}, 278 {.xscale = 2, .yscale = 4, .eq = 0}, 282 {.xscale = 1, .yscale = 3, .eq = 1}, 286 {.xscale = 1, .yscale = 2, .eq = 1}, 359 {.xscale = 1, .yscale = 3, .eq = 0}, 369 {.xscale = 1, .yscale = 4, .eq = 0}, [all …]
|
| H A D | spear1310_clock.c | 253 {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */ 254 {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */ 255 {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */ 256 {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */ 257 {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */ 320 {.xscale = 1, .yscale = 3, .eq = 0}, 325 {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz */ 331 {.xscale = 1, .yscale = 4, .eq = 0}, /* 1.53 MHz */ 332 {.xscale = 1, .yscale = 2, .eq = 0}, /* 3.07 Mhz */ 339 {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */ [all …]
|
| /linux-6.15/drivers/infiniband/hw/mlx5/ |
| H A D | odp.c | 1603 struct mlx5_ib_pf_eq *eq = pfault->eq; in mlx5_ib_eqe_pf_action() local 1646 eq->dev, in mlx5_ib_eq_pf_process() 1673 eq->dev, in mlx5_ib_eq_pf_process() 1702 eq->dev, in mlx5_ib_eq_pf_process() 1709 eq->dev, in mlx5_ib_eq_pf_process() 1724 pfault->eq = eq; in mlx5_ib_eq_pf_process() 1784 if (eq->core) in mlx5r_odp_create_eq() 1788 eq->dev = dev; in mlx5r_odp_create_eq() 1800 if (!eq->wq) { in mlx5r_odp_create_eq() 1815 err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb); in mlx5r_odp_create_eq() [all …]
|
| /linux-6.15/drivers/infiniband/hw/efa/ |
| H A D | efa_main.c | 110 struct efa_eq *eq = data; in efa_intr_msix_comp() local 111 struct efa_com_dev *edev = eq->eeq.edev; in efa_intr_msix_comp() 113 efa_com_eq_comp_intr_handler(edev, &eq->eeq); in efa_intr_msix_comp() 151 eq->irq.handler = efa_intr_msix_comp; in efa_setup_comp_irq() 152 eq->irq.data = eq; in efa_setup_comp_irq() 153 eq->irq.vector = vector; in efa_setup_comp_irq() 303 efa_com_eq_destroy(&dev->edev, &eq->eeq); in efa_destroy_eq() 304 efa_free_irq(dev, &eq->irq); in efa_destroy_eq() 311 efa_setup_comp_irq(dev, eq, msix_vec); in efa_create_eq() 312 err = efa_request_irq(dev, &eq->irq); in efa_create_eq() [all …]
|
| /linux-6.15/net/dns_resolver/ |
| H A D | dns_key.c | 157 const char *eq; in dns_resolver_preparse() local 168 eq = memchr(opt, '=', opt_len); in dns_resolver_preparse() 169 if (eq) { in dns_resolver_preparse() 170 opt_nlen = eq - opt; in dns_resolver_preparse() 171 eq++; in dns_resolver_preparse() 172 memcpy(optval, eq, next_opt - eq); in dns_resolver_preparse() 173 optval[next_opt - eq] = '\0'; in dns_resolver_preparse()
|
| /linux-6.15/drivers/firmware/broadcom/ |
| H A D | bcm47xx_nvram.c | 187 char *var, *value, *end, *eq; in bcm47xx_nvram_getenv() local 203 eq = strchr(var, '='); in bcm47xx_nvram_getenv() 204 if (!eq) in bcm47xx_nvram_getenv() 206 value = eq + 1; in bcm47xx_nvram_getenv() 207 if (eq - var == strlen(name) && in bcm47xx_nvram_getenv() 208 strncmp(var, name, eq - var) == 0) in bcm47xx_nvram_getenv()
|
| /linux-6.15/arch/arc/lib/ |
| H A D | strlen.S | 21 mov.eq r7,r4 24 or.eq r12,r12,r1 38 or.eq r12,r12,r1 57 mov.eq r1,r12 69 mov.eq r2,r6
|
| /linux-6.15/arch/arm64/lib/ |
| H A D | crc32.S | 74 csel x3, x3, x4, eq 75 csel w0, w0, w8, eq 79 csel x3, x3, x4, eq 80 csel w0, w0, w8, eq 84 csel w3, w3, w4, eq 85 csel w0, w0, w8, eq 88 csel w0, w0, w8, eq 92 csel w0, w0, w8, eq
|