Lines Matching refs:evp
79 InitEvP(struct ev_pointer *evp, struct ev_base *evb) in InitEvP() argument
81 *evp = evb->dflt_evp; in InitEvP()
82 event_t map = evp->cb_map; in InitEvP()
85 evp->evt->ent[i].ref++; in InitEvP()
92 evp->evt->ent[i].ref++; in InitEvP()
97 CleanupEvP(struct ev_pointer *evp) in CleanupEvP() argument
99 EVENT_FOREACH_START(ev, i, BEV_OFFSET, BEV_OFFSET + NUM_BEV, evp->cb_map) { in CleanupEvP()
100 evp->evt->ent[i].ref--; in CleanupEvP()
101 } EVENT_FOREACH_END(ev, evp->cb_map); in CleanupEvP()
103 if (!evp->cb_map) in CleanupEvP()
106 EVENT_FOREACH_START(ev, i, UDE_OFFSET, MAX_EV, evp->cb_map) { in CleanupEvP()
107 evp->evt->ent[i].ref--; in CleanupEvP()
108 } EVENT_FOREACH_END(ev, evp->cb_map); in CleanupEvP()
136 RegCbWCpy(struct ev_pointer *evp, struct ev_table *new_evt, in RegCbWCpy() argument
143 struct ev_table *cur_evt = evp->evt; in RegCbWCpy()
147 assert(evp->evt != new_evt); in RegCbWCpy()
148 assert(!(evp->cb_map & events)); in RegCbWCpy()
149 assert((evp->cb_map & cur_evt->map) == evp->cb_map); in RegCbWCpy()
152 ev_total = events | evp->cb_map; in RegCbWCpy()
153 evcpy = evp->cb_map & ~new_evt->map; in RegCbWCpy()
154 evp->evt = new_evt; in RegCbWCpy()
156 ev_inc_ref = events | evp->cb_map; in RegCbWCpy()
157 ev_dec_ref = evp->cb_map; in RegCbWCpy()
160 evp->cb_map |= events; in RegCbWCpy()
199 evp->ft_map |= g_udes[i - UDE_OFFSET].ancestors; in RegCbWCpy()
219 RegCbWoCpy(struct ev_pointer *evp, event_t events, void *cb) in RegCbWoCpy() argument
224 struct ev_table *cur_evt = evp->evt; in RegCbWoCpy()
228 assert(!(evp->cb_map & events)); in RegCbWoCpy()
229 assert((evp->cb_map & cur_evt->map) == evp->cb_map); in RegCbWoCpy()
234 evp->cb_map |= events; in RegCbWoCpy()
266 evp->ft_map |= g_udes[i - UDE_OFFSET].ancestors; in RegCbWoCpy()
279 UnregCb(struct ev_pointer *evp, event_t events) in UnregCb() argument
281 assert(evp); in UnregCb()
283 struct ev_table *evt = evp->evt; in UnregCb()
284 evp->cb_map &= ~events; in UnregCb()
294 !(evid & evp->cb_map) in UnregCb()
296 && !(g_descendants[walk] & evp->cb_map)) { in UnregCb()
298 evp->ft_map &= ~(1L << g_udes[walk - UDE_OFFSET].parent); in UnregCb()
332 FindReusableEvT(struct ev_pointer *evp, struct ev_base *evb, in FindReusableEvT() argument
335 struct ev_table *cur_evt = evp->evt; in FindReusableEvT()
338 assert((evp->cb_map & cur_evt->map) == evp->cb_map); in FindReusableEvT()
341 event_t overlap = evp->cb_map & walk->map; in FindReusableEvT()
378 ModCb(mtcp_manager_t mtcp, int op, struct ev_pointer *evp, struct ev_base *evb, in ModCb() argument
381 struct ev_table *evt = evp->evt; in ModCb()
388 if (events & evp->cb_map) { in ModCb()
395 if (!(nevt = FindReusableEvT(evp, evb, events, cb))) { in ModCb()
403 RegCbWCpy(evp, nevt, events, cb); in ModCb()
405 RegCbWoCpy(evp, events, cb); in ModCb()
409 RegCbWoCpy(evp, events, cb); in ModCb()
412 if ((events & evp->cb_map) != events) { in ModCb()
418 UnregCb(evp, events); in ModCb()
430 struct ev_pointer *evp; in ModifyCallback() local
451 evp = &socket->monitor_stream->dontcare_evp; in ModifyCallback()
454 evp = &socket->monitor_stream->pre_tcp_evp; in ModifyCallback()
457 evp = &socket->monitor_stream->post_tcp_evp; in ModifyCallback()
473 evp = &evb->dflt_evp; in ModifyCallback()
479 return ModCb(mtcp, op, evp, evb, events, callback); in ModifyCallback()
624 struct ev_pointer * const evp = in HandleCallback() local
636 if (!evp || !((cb_map = events & evp->cb_map) || (g_ude_map & evp->cb_map))) in HandleCallback()
659 ft_map = events & evp->ft_map; in HandleCallback()
662 struct ev_table * const evt = evp->evt; in HandleCallback()
679 cb_map |= descendants & evp->cb_map; in HandleCallback()
680 ft_map |= descendants & evp->ft_map; in HandleCallback()