1*76404edcSAsim Jamshed #define __MOS_CORE_
2*76404edcSAsim Jamshed
3*76404edcSAsim Jamshed #ifndef NEWEV
4*76404edcSAsim Jamshed
5*76404edcSAsim Jamshed /* NOTE TODO:
6*76404edcSAsim Jamshed * We can improve performance and reduce memory usage by making MOS_NULL
7*76404edcSAsim Jamshed * hook and MOS_HK_RCV share event structures since no event can be registered
8*76404edcSAsim Jamshed * to both hooks. */
9*76404edcSAsim Jamshed
10*76404edcSAsim Jamshed #include <stdio.h>
11*76404edcSAsim Jamshed #include <stdlib.h>
12*76404edcSAsim Jamshed #include <stdint.h>
13*76404edcSAsim Jamshed #include <errno.h>
14*76404edcSAsim Jamshed #include <assert.h>
15*76404edcSAsim Jamshed #include <string.h>
16*76404edcSAsim Jamshed
17*76404edcSAsim Jamshed #include "mtcp.h"
18*76404edcSAsim Jamshed #include "event_callback.h"
19*76404edcSAsim Jamshed #include "debug.h"
20*76404edcSAsim Jamshed #include "mos_api.h"
21*76404edcSAsim Jamshed #include "mtcp_api.h"
22*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
23*76404edcSAsim Jamshed enum {
24*76404edcSAsim Jamshed OP_REG,
25*76404edcSAsim Jamshed OP_UNREG,
26*76404edcSAsim Jamshed };
27*76404edcSAsim Jamshed
28*76404edcSAsim Jamshed /* macros */
29*76404edcSAsim Jamshed #define IS_PO2(x) (!((x - 1) & (x)))
30*76404edcSAsim Jamshed #define NUM_UDE (MAX_EV - UDE_OFFSET)
31*76404edcSAsim Jamshed #define EVENT_FOREACH_START(ev, idx, from, to, map) \
32*76404edcSAsim Jamshed do {int idx; for (idx = (from); idx < (to); idx++) { \
33*76404edcSAsim Jamshed const event_t ev = 1L << idx; if (!((map) & (ev))) continue;
34*76404edcSAsim Jamshed #define EVENT_FOREACH_END(ev, map) \
35*76404edcSAsim Jamshed if (!((map) &= ~ev)) break;}} while (0)
36*76404edcSAsim Jamshed
37*76404edcSAsim Jamshed /* Global variables (per CPU core) */
38*76404edcSAsim Jamshed static struct {
39*76404edcSAsim Jamshed filter_t ft;
40*76404edcSAsim Jamshed event_t ancestors;
41*76404edcSAsim Jamshed int8_t parent;
42*76404edcSAsim Jamshed } g_udes[NUM_UDE];
43*76404edcSAsim Jamshed static event_t g_descendants[MAX_EV];
44*76404edcSAsim Jamshed static event_t g_ude_map;
45*76404edcSAsim Jamshed static const event_t g_bev_map = (~(((event_t)-1) << NUM_BEV)) << BEV_OFFSET;
46*76404edcSAsim Jamshed static int8_t g_ude_id[MAX_EV][NUM_UDE];
47*76404edcSAsim Jamshed
48*76404edcSAsim Jamshed /* FIXME: ft_map is not managed properly (especially in UnregCb()). */
49*76404edcSAsim Jamshed
50*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
51*76404edcSAsim Jamshed void
GlobInitEvent(void)52*76404edcSAsim Jamshed GlobInitEvent(void)
53*76404edcSAsim Jamshed {
54*76404edcSAsim Jamshed int i;
55*76404edcSAsim Jamshed for (i = 0; i < NUM_UDE; i++) {
56*76404edcSAsim Jamshed g_udes[i].ft = NULL;
57*76404edcSAsim Jamshed g_udes[i].ancestors = 0;
58*76404edcSAsim Jamshed g_udes[i].parent = -1;
59*76404edcSAsim Jamshed }
60*76404edcSAsim Jamshed
61*76404edcSAsim Jamshed for (i = 0; i < MAX_EV; i++)
62*76404edcSAsim Jamshed g_descendants[i] = 0;
63*76404edcSAsim Jamshed
64*76404edcSAsim Jamshed memset(g_ude_id, -1, MAX_EV * NUM_UDE);
65*76404edcSAsim Jamshed g_ude_map = 0;
66*76404edcSAsim Jamshed }
67*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
68*76404edcSAsim Jamshed void
InitEvent(mtcp_manager_t mtcp,int num_evt)69*76404edcSAsim Jamshed InitEvent(mtcp_manager_t mtcp, int num_evt)
70*76404edcSAsim Jamshed {
71*76404edcSAsim Jamshed if (!(mtcp->evt_pool = MPCreate(sizeof(struct ev_table),
72*76404edcSAsim Jamshed sizeof(struct ev_table) * num_evt, 0))) {
73*76404edcSAsim Jamshed TRACE_ERROR("Failed to allocate ev_table pool\n");
74*76404edcSAsim Jamshed exit(0);
75*76404edcSAsim Jamshed }
76*76404edcSAsim Jamshed }
77*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
78*76404edcSAsim Jamshed void
InitEvP(struct ev_pointer * evp,struct ev_base * evb)79*76404edcSAsim Jamshed InitEvP(struct ev_pointer *evp, struct ev_base *evb)
80*76404edcSAsim Jamshed {
81*76404edcSAsim Jamshed *evp = evb->dflt_evp;
82*76404edcSAsim Jamshed event_t map = evp->cb_map;
83*76404edcSAsim Jamshed
84*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, BEV_OFFSET, BEV_OFFSET + NUM_BEV, map) {
85*76404edcSAsim Jamshed evp->evt->ent[i].ref++;
86*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, map);
87*76404edcSAsim Jamshed
88*76404edcSAsim Jamshed if (!map)
89*76404edcSAsim Jamshed return;
90*76404edcSAsim Jamshed
91*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, UDE_OFFSET, MAX_EV, map) {
92*76404edcSAsim Jamshed evp->evt->ent[i].ref++;
93*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, map);
94*76404edcSAsim Jamshed }
95*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
96*76404edcSAsim Jamshed void
CleanupEvP(struct ev_pointer * evp)97*76404edcSAsim Jamshed CleanupEvP(struct ev_pointer *evp)
98*76404edcSAsim Jamshed {
99*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, BEV_OFFSET, BEV_OFFSET + NUM_BEV, evp->cb_map) {
100*76404edcSAsim Jamshed evp->evt->ent[i].ref--;
101*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, evp->cb_map);
102*76404edcSAsim Jamshed
103*76404edcSAsim Jamshed if (!evp->cb_map)
104*76404edcSAsim Jamshed return;
105*76404edcSAsim Jamshed
106*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, UDE_OFFSET, MAX_EV, evp->cb_map) {
107*76404edcSAsim Jamshed evp->evt->ent[i].ref--;
108*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, evp->cb_map);
109*76404edcSAsim Jamshed }
110*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
111*76404edcSAsim Jamshed void
InitEvB(mtcp_manager_t mtcp,struct ev_base * evb)112*76404edcSAsim Jamshed InitEvB(mtcp_manager_t mtcp, struct ev_base *evb)
113*76404edcSAsim Jamshed {
114*76404edcSAsim Jamshed TAILQ_INIT(&evb->evth);
115*76404edcSAsim Jamshed struct ev_table *dflt_evt = MPAllocateChunk(mtcp->evt_pool);
116*76404edcSAsim Jamshed memset(dflt_evt, 0, sizeof(struct ev_table));
117*76404edcSAsim Jamshed
118*76404edcSAsim Jamshed TAILQ_INSERT_HEAD(&evb->evth, dflt_evt, link);
119*76404edcSAsim Jamshed evb->dflt_evp.cb_map = 0;
120*76404edcSAsim Jamshed evb->dflt_evp.ft_map = 0;
121*76404edcSAsim Jamshed evb->dflt_evp.evt = dflt_evt;
122*76404edcSAsim Jamshed }
123*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
124*76404edcSAsim Jamshed void
CleanupEvB(mtcp_manager_t mtcp,struct ev_base * evb)125*76404edcSAsim Jamshed CleanupEvB(mtcp_manager_t mtcp, struct ev_base *evb)
126*76404edcSAsim Jamshed {
127*76404edcSAsim Jamshed struct ev_table *walk, *tmp;
128*76404edcSAsim Jamshed for (walk = TAILQ_FIRST(&evb->evth); walk != NULL; walk = tmp) {
129*76404edcSAsim Jamshed tmp = TAILQ_NEXT(walk, link);
130*76404edcSAsim Jamshed
131*76404edcSAsim Jamshed MPFreeChunk(mtcp->evt_pool, walk);
132*76404edcSAsim Jamshed }
133*76404edcSAsim Jamshed }
134*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
135*76404edcSAsim Jamshed static inline void
RegCbWCpy(struct ev_pointer * evp,struct ev_table * new_evt,event_t events,void * cb)136*76404edcSAsim Jamshed RegCbWCpy(struct ev_pointer *evp, struct ev_table *new_evt,
137*76404edcSAsim Jamshed event_t events, void *cb)
138*76404edcSAsim Jamshed {
139*76404edcSAsim Jamshed /* NOTE: We may apply binary search which is O(log(N)) later, while current
140*76404edcSAsim Jamshed * linear search is O(N). */
141*76404edcSAsim Jamshed event_t evcpy = 0, ev_total;
142*76404edcSAsim Jamshed event_t ev_inc_ref = 0, ev_dec_ref = 0;
143*76404edcSAsim Jamshed struct ev_table *cur_evt = evp->evt;
144*76404edcSAsim Jamshed
145*76404edcSAsim Jamshed event_t overlap = events & new_evt->map;
146*76404edcSAsim Jamshed
147*76404edcSAsim Jamshed assert(evp->evt != new_evt);
148*76404edcSAsim Jamshed assert(!(evp->cb_map & events));
149*76404edcSAsim Jamshed assert((evp->cb_map & cur_evt->map) == evp->cb_map);
150*76404edcSAsim Jamshed
151*76404edcSAsim Jamshed /* event table will be changed to new_evt */
152*76404edcSAsim Jamshed ev_total = events | evp->cb_map;
153*76404edcSAsim Jamshed evcpy = evp->cb_map & ~new_evt->map;
154*76404edcSAsim Jamshed evp->evt = new_evt;
155*76404edcSAsim Jamshed
156*76404edcSAsim Jamshed ev_inc_ref = events | evp->cb_map;
157*76404edcSAsim Jamshed ev_dec_ref = evp->cb_map;
158*76404edcSAsim Jamshed
159*76404edcSAsim Jamshed new_evt->map |= ev_total;
160*76404edcSAsim Jamshed evp->cb_map |= events;
161*76404edcSAsim Jamshed
162*76404edcSAsim Jamshed /* For built-in events */
163*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, BEV_OFFSET, BEV_OFFSET + NUM_BEV, ev_total) {
164*76404edcSAsim Jamshed if (events & ev) {
165*76404edcSAsim Jamshed assert((ev & overlap) ? new_evt->ent[i].cb == cb
166*76404edcSAsim Jamshed : new_evt->ent[i].ref == 0);
167*76404edcSAsim Jamshed if (!(ev & overlap))
168*76404edcSAsim Jamshed new_evt->ent[i].cb = cb;
169*76404edcSAsim Jamshed } else if (evcpy & ev) {
170*76404edcSAsim Jamshed assert(new_evt && new_evt != cur_evt);
171*76404edcSAsim Jamshed new_evt->ent[i].cb = cur_evt->ent[i].cb;
172*76404edcSAsim Jamshed }
173*76404edcSAsim Jamshed
174*76404edcSAsim Jamshed /* ev_dec_ref is subset of ev_inc_ref */
175*76404edcSAsim Jamshed if (ev_inc_ref & ev) {
176*76404edcSAsim Jamshed new_evt->ent[i].ref++;
177*76404edcSAsim Jamshed if (!(new_evt->map & ev))
178*76404edcSAsim Jamshed new_evt->map |= ev;
179*76404edcSAsim Jamshed if (ev_dec_ref & ev) {
180*76404edcSAsim Jamshed if (--cur_evt->ent[i].ref)
181*76404edcSAsim Jamshed cur_evt->map &= ~ev;
182*76404edcSAsim Jamshed }
183*76404edcSAsim Jamshed }
184*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, ev_total);
185*76404edcSAsim Jamshed
186*76404edcSAsim Jamshed if (!ev_total)
187*76404edcSAsim Jamshed return;
188*76404edcSAsim Jamshed
189*76404edcSAsim Jamshed /* For UDEs */
190*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, UDE_OFFSET, MAX_EV, ev_total) {
191*76404edcSAsim Jamshed if (events & ev) {
192*76404edcSAsim Jamshed assert((ev & overlap) ? new_evt->ent[i].cb == cb
193*76404edcSAsim Jamshed : new_evt->ent[i].ref == 0);
194*76404edcSAsim Jamshed if (!(ev & overlap))
195*76404edcSAsim Jamshed new_evt->ent[i].cb = cb;
196*76404edcSAsim Jamshed /* update ft_map */
197*76404edcSAsim Jamshed assert(g_udes[i - UDE_OFFSET].ft);
198*76404edcSAsim Jamshed assert(g_udes[i - UDE_OFFSET].ancestors);
199*76404edcSAsim Jamshed evp->ft_map |= g_udes[i - UDE_OFFSET].ancestors;
200*76404edcSAsim Jamshed } else if (evcpy & ev) {
201*76404edcSAsim Jamshed assert(new_evt && new_evt != cur_evt);
202*76404edcSAsim Jamshed new_evt->ent[i].cb = cur_evt->ent[i].cb;
203*76404edcSAsim Jamshed }
204*76404edcSAsim Jamshed
205*76404edcSAsim Jamshed /* ev_dec_ref is subset of ev_inc_ref */
206*76404edcSAsim Jamshed if (ev_inc_ref & ev) {
207*76404edcSAsim Jamshed new_evt->ent[i].ref++;
208*76404edcSAsim Jamshed if (!(new_evt->map & ev))
209*76404edcSAsim Jamshed new_evt->map |= ev;
210*76404edcSAsim Jamshed if (ev_dec_ref & ev) {
211*76404edcSAsim Jamshed if (--cur_evt->ent[i].ref)
212*76404edcSAsim Jamshed cur_evt->map &= ~ev;
213*76404edcSAsim Jamshed }
214*76404edcSAsim Jamshed }
215*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, ev_total);
216*76404edcSAsim Jamshed }
217*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
218*76404edcSAsim Jamshed static inline void
RegCbWoCpy(struct ev_pointer * evp,event_t events,void * cb)219*76404edcSAsim Jamshed RegCbWoCpy(struct ev_pointer *evp, event_t events, void *cb)
220*76404edcSAsim Jamshed {
221*76404edcSAsim Jamshed /* NOTE: We may apply binary search which is O(log(N)) later, while current
222*76404edcSAsim Jamshed * linear search is O(N). */
223*76404edcSAsim Jamshed event_t ev_inc_ref = 0;
224*76404edcSAsim Jamshed struct ev_table *cur_evt = evp->evt;
225*76404edcSAsim Jamshed
226*76404edcSAsim Jamshed event_t overlap = events & cur_evt->map;
227*76404edcSAsim Jamshed
228*76404edcSAsim Jamshed assert(!(evp->cb_map & events));
229*76404edcSAsim Jamshed assert((evp->cb_map & cur_evt->map) == evp->cb_map);
230*76404edcSAsim Jamshed
231*76404edcSAsim Jamshed ev_inc_ref = events;
232*76404edcSAsim Jamshed
233*76404edcSAsim Jamshed cur_evt->map |= events;
234*76404edcSAsim Jamshed evp->cb_map |= events;
235*76404edcSAsim Jamshed
236*76404edcSAsim Jamshed /* For built-in events */
237*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, BEV_OFFSET, BEV_OFFSET + NUM_BEV, events) {
238*76404edcSAsim Jamshed if (events & ev) {
239*76404edcSAsim Jamshed assert((ev & overlap) ? cur_evt->ent[i].cb == cb
240*76404edcSAsim Jamshed : cur_evt->ent[i].ref == 0);
241*76404edcSAsim Jamshed if (!(ev & overlap))
242*76404edcSAsim Jamshed cur_evt->ent[i].cb = cb;
243*76404edcSAsim Jamshed }
244*76404edcSAsim Jamshed
245*76404edcSAsim Jamshed /* ev_dec_ref is subset of ev_inc_ref */
246*76404edcSAsim Jamshed if (ev_inc_ref & ev) {
247*76404edcSAsim Jamshed cur_evt->ent[i].ref++;
248*76404edcSAsim Jamshed if (!(cur_evt->map & ev))
249*76404edcSAsim Jamshed cur_evt->map |= ev;
250*76404edcSAsim Jamshed }
251*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, events);
252*76404edcSAsim Jamshed
253*76404edcSAsim Jamshed if (!events)
254*76404edcSAsim Jamshed return;
255*76404edcSAsim Jamshed
256*76404edcSAsim Jamshed /* For UDEs */
257*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, UDE_OFFSET, MAX_EV, events) {
258*76404edcSAsim Jamshed if (events & ev) {
259*76404edcSAsim Jamshed assert((ev & overlap) ? cur_evt->ent[i].cb == cb
260*76404edcSAsim Jamshed : cur_evt->ent[i].ref == 0);
261*76404edcSAsim Jamshed if (!(ev & overlap))
262*76404edcSAsim Jamshed cur_evt->ent[i].cb = cb;
263*76404edcSAsim Jamshed /* update ft_map */
264*76404edcSAsim Jamshed assert(g_udes[i - UDE_OFFSET].ft);
265*76404edcSAsim Jamshed assert(g_udes[i - UDE_OFFSET].ancestors);
266*76404edcSAsim Jamshed evp->ft_map |= g_udes[i - UDE_OFFSET].ancestors;
267*76404edcSAsim Jamshed }
268*76404edcSAsim Jamshed
269*76404edcSAsim Jamshed /* ev_dec_ref is subset of ev_inc_ref */
270*76404edcSAsim Jamshed if (ev_inc_ref & ev) {
271*76404edcSAsim Jamshed cur_evt->ent[i].ref++;
272*76404edcSAsim Jamshed if (!(cur_evt->map & ev))
273*76404edcSAsim Jamshed cur_evt->map |= ev;
274*76404edcSAsim Jamshed }
275*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, events);
276*76404edcSAsim Jamshed }
277*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
278*76404edcSAsim Jamshed static inline void
UnregCb(struct ev_pointer * evp,event_t events)279*76404edcSAsim Jamshed UnregCb(struct ev_pointer *evp, event_t events)
280*76404edcSAsim Jamshed {
281*76404edcSAsim Jamshed assert(evp);
282*76404edcSAsim Jamshed
283*76404edcSAsim Jamshed struct ev_table *evt = evp->evt;
284*76404edcSAsim Jamshed evp->cb_map &= ~events;
285*76404edcSAsim Jamshed
286*76404edcSAsim Jamshed /* Unregister unnecessary UDEs */
287*76404edcSAsim Jamshed if (events & g_ude_map) {
288*76404edcSAsim Jamshed event_t evs = events & g_ude_map;
289*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, UDE_OFFSET, MAX_EV, evs) {
290*76404edcSAsim Jamshed int walk = i;
291*76404edcSAsim Jamshed while (1) {
292*76404edcSAsim Jamshed const event_t evid = 1L << walk;
293*76404edcSAsim Jamshed if (/* no registered callback */
294*76404edcSAsim Jamshed !(evid & evp->cb_map)
295*76404edcSAsim Jamshed /* no child events */
296*76404edcSAsim Jamshed && !(g_descendants[walk] & evp->cb_map)) {
297*76404edcSAsim Jamshed /* this UDE filter is useless */
298*76404edcSAsim Jamshed evp->ft_map &= ~(1L << g_udes[walk - UDE_OFFSET].parent);
299*76404edcSAsim Jamshed /* No need to see this event in rest of EVENT_FOREACH */
300*76404edcSAsim Jamshed evs &= ~evid;
301*76404edcSAsim Jamshed if ((walk = g_udes[walk - UDE_OFFSET].parent) < UDE_OFFSET)
302*76404edcSAsim Jamshed break;
303*76404edcSAsim Jamshed } else
304*76404edcSAsim Jamshed break;
305*76404edcSAsim Jamshed }
306*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, evs);
307*76404edcSAsim Jamshed }
308*76404edcSAsim Jamshed
309*76404edcSAsim Jamshed /* Placing reference counter for each event table entry, instead of each
310*76404edcSAsim Jamshed * event table, and decrement them for every callback unregistration may
311*76404edcSAsim Jamshed * look inefficient. However, actually, it does NOT. If reference counter
312*76404edcSAsim Jamshed * is for each event table, then we need to call FindReusableEvT() for
313*76404edcSAsim Jamshed * every callback unregistration to find reusable event table.
314*76404edcSAsim Jamshed * FindReusableEvT() is heavier than per-event reference counter update.
315*76404edcSAsim Jamshed * And that way also wastes memory. */
316*76404edcSAsim Jamshed
317*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, BEV_OFFSET, BEV_OFFSET + NUM_BEV, events) {
318*76404edcSAsim Jamshed if (--evt->ent[i].ref == 0)
319*76404edcSAsim Jamshed evt->map &= ~ev;
320*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, events);
321*76404edcSAsim Jamshed
322*76404edcSAsim Jamshed if (!events)
323*76404edcSAsim Jamshed return;
324*76404edcSAsim Jamshed
325*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, UDE_OFFSET, MAX_EV, events) {
326*76404edcSAsim Jamshed if (--evt->ent[i].ref == 0)
327*76404edcSAsim Jamshed evt->map &= ~ev;
328*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, events);
329*76404edcSAsim Jamshed }
330*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
331*76404edcSAsim Jamshed static inline struct ev_table *
FindReusableEvT(struct ev_pointer * evp,struct ev_base * evb,event_t events,void * cb)332*76404edcSAsim Jamshed FindReusableEvT(struct ev_pointer *evp, struct ev_base *evb,
333*76404edcSAsim Jamshed event_t events, void *cb)
334*76404edcSAsim Jamshed {
335*76404edcSAsim Jamshed struct ev_table *cur_evt = evp->evt;
336*76404edcSAsim Jamshed struct ev_table *walk;
337*76404edcSAsim Jamshed
338*76404edcSAsim Jamshed assert((evp->cb_map & cur_evt->map) == evp->cb_map);
339*76404edcSAsim Jamshed
340*76404edcSAsim Jamshed TAILQ_FOREACH(walk, &evb->evth, link) {
341*76404edcSAsim Jamshed event_t overlap = evp->cb_map & walk->map;
342*76404edcSAsim Jamshed assert((events & overlap) == 0);
343*76404edcSAsim Jamshed event_t ev_total = events | overlap;
344*76404edcSAsim Jamshed
345*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, BEV_OFFSET, BEV_OFFSET + NUM_BEV, ev_total) {
346*76404edcSAsim Jamshed if (ev & events) {
347*76404edcSAsim Jamshed if (walk->ent[i].cb != cb)
348*76404edcSAsim Jamshed goto __continue;
349*76404edcSAsim Jamshed } else /* if (ev & overlap) */ {
350*76404edcSAsim Jamshed if (walk->ent[i].cb != cur_evt->ent[i].cb)
351*76404edcSAsim Jamshed goto __continue;
352*76404edcSAsim Jamshed }
353*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, ev_total);
354*76404edcSAsim Jamshed
355*76404edcSAsim Jamshed if (!ev_total)
356*76404edcSAsim Jamshed return walk;
357*76404edcSAsim Jamshed
358*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, i, UDE_OFFSET, MAX_EV, ev_total) {
359*76404edcSAsim Jamshed if (ev & events) {
360*76404edcSAsim Jamshed if (walk->ent[i].cb != cb)
361*76404edcSAsim Jamshed goto __continue;
362*76404edcSAsim Jamshed } else /* if (ev & overlap) */ {
363*76404edcSAsim Jamshed if (walk->ent[i].cb != cur_evt->ent[i].cb)
364*76404edcSAsim Jamshed goto __continue;
365*76404edcSAsim Jamshed }
366*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, ev_total);
367*76404edcSAsim Jamshed
368*76404edcSAsim Jamshed if (!ev_total)
369*76404edcSAsim Jamshed return walk;
370*76404edcSAsim Jamshed __continue:
371*76404edcSAsim Jamshed continue;
372*76404edcSAsim Jamshed }
373*76404edcSAsim Jamshed
374*76404edcSAsim Jamshed return NULL;
375*76404edcSAsim Jamshed }
376*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
377*76404edcSAsim Jamshed inline int
ModCb(mtcp_manager_t mtcp,int op,struct ev_pointer * evp,struct ev_base * evb,event_t events,void * cb)378*76404edcSAsim Jamshed ModCb(mtcp_manager_t mtcp, int op, struct ev_pointer *evp, struct ev_base *evb,
379*76404edcSAsim Jamshed event_t events, void *cb)
380*76404edcSAsim Jamshed {
381*76404edcSAsim Jamshed struct ev_table *evt = evp->evt;
382*76404edcSAsim Jamshed
383*76404edcSAsim Jamshed assert(evt);
384*76404edcSAsim Jamshed
385*76404edcSAsim Jamshed if (op == OP_REG) {
386*76404edcSAsim Jamshed /* NOTE: we do not register new callback if correponding 'map' is
387*76404edcSAsim Jamshed * occupied */
388*76404edcSAsim Jamshed if (events & evp->cb_map) {
389*76404edcSAsim Jamshed /* callback overwrite error */
390*76404edcSAsim Jamshed errno = EINVAL;
391*76404edcSAsim Jamshed return -1;
392*76404edcSAsim Jamshed } else if (events & evt->map) {
393*76404edcSAsim Jamshed /* event registration conflict */
394*76404edcSAsim Jamshed struct ev_table *nevt;
395*76404edcSAsim Jamshed if (!(nevt = FindReusableEvT(evp, evb, events, cb))) {
396*76404edcSAsim Jamshed nevt = MPAllocateChunk(mtcp->evt_pool);
397*76404edcSAsim Jamshed assert(nevt);
398*76404edcSAsim Jamshed TAILQ_INSERT_HEAD(&evb->evth, nevt, link);
399*76404edcSAsim Jamshed }
400*76404edcSAsim Jamshed
401*76404edcSAsim Jamshed /* register callback */
402*76404edcSAsim Jamshed if (nevt != evt)
403*76404edcSAsim Jamshed RegCbWCpy(evp, nevt, events, cb);
404*76404edcSAsim Jamshed else
405*76404edcSAsim Jamshed RegCbWoCpy(evp, events, cb);
406*76404edcSAsim Jamshed
407*76404edcSAsim Jamshed } else {
408*76404edcSAsim Jamshed /* reuse default event table */
409*76404edcSAsim Jamshed RegCbWoCpy(evp, events, cb);
410*76404edcSAsim Jamshed }
411*76404edcSAsim Jamshed } else /* if (op == OP_UNREG) */ {
412*76404edcSAsim Jamshed if ((events & evp->cb_map) != events) {
413*76404edcSAsim Jamshed /* unregister unexisting callback error */
414*76404edcSAsim Jamshed errno = EINVAL;
415*76404edcSAsim Jamshed return -1;
416*76404edcSAsim Jamshed } else {
417*76404edcSAsim Jamshed /* unregister callback */
418*76404edcSAsim Jamshed UnregCb(evp, events);
419*76404edcSAsim Jamshed }
420*76404edcSAsim Jamshed }
421*76404edcSAsim Jamshed
422*76404edcSAsim Jamshed return 0;
423*76404edcSAsim Jamshed }
424*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
425*76404edcSAsim Jamshed static inline int
ModifyCallback(mctx_t mctx,int op,int sockid,event_t events,int hook_point,void * callback)426*76404edcSAsim Jamshed ModifyCallback(mctx_t mctx, int op, int sockid, event_t events,
427*76404edcSAsim Jamshed int hook_point, void* callback)
428*76404edcSAsim Jamshed {
429*76404edcSAsim Jamshed socket_map_t socket;
430*76404edcSAsim Jamshed struct ev_pointer *evp;
431*76404edcSAsim Jamshed struct ev_base *evb;
432*76404edcSAsim Jamshed
433*76404edcSAsim Jamshed assert(op == OP_REG || op == OP_UNREG);
434*76404edcSAsim Jamshed
435*76404edcSAsim Jamshed if ((events & (g_bev_map | g_ude_map)) != events) {
436*76404edcSAsim Jamshed errno = EINVAL;
437*76404edcSAsim Jamshed return -1;
438*76404edcSAsim Jamshed }
439*76404edcSAsim Jamshed
440*76404edcSAsim Jamshed if ((op == OP_REG) && !callback)
441*76404edcSAsim Jamshed return -1;
442*76404edcSAsim Jamshed
443*76404edcSAsim Jamshed mtcp_manager_t mtcp = GetMTCPManager(mctx);
444*76404edcSAsim Jamshed if (!mtcp)
445*76404edcSAsim Jamshed return -1;
446*76404edcSAsim Jamshed
447*76404edcSAsim Jamshed socket = &mtcp->msmap[sockid];
448*76404edcSAsim Jamshed
449*76404edcSAsim Jamshed if (socket->socktype == MOS_SOCK_MONITOR_STREAM_ACTIVE) {
450*76404edcSAsim Jamshed if (hook_point == MOS_NULL) {
451*76404edcSAsim Jamshed evp = &socket->monitor_stream->dontcare_evp;
452*76404edcSAsim Jamshed evb = &socket->monitor_stream->monitor_listener->dontcare_evb;
453*76404edcSAsim Jamshed } else if (hook_point == MOS_HK_RCV) {
454*76404edcSAsim Jamshed evp = &socket->monitor_stream->pre_tcp_evp;
455*76404edcSAsim Jamshed evb = &socket->monitor_stream->monitor_listener->pre_tcp_evb;
456*76404edcSAsim Jamshed } else if (hook_point == MOS_HK_SND) {
457*76404edcSAsim Jamshed evp = &socket->monitor_stream->post_tcp_evp;
458*76404edcSAsim Jamshed evb = &socket->monitor_stream->monitor_listener->post_tcp_evb;
459*76404edcSAsim Jamshed } else
460*76404edcSAsim Jamshed return -1;
461*76404edcSAsim Jamshed
462*76404edcSAsim Jamshed } else if (socket->socktype == MOS_SOCK_MONITOR_STREAM
463*76404edcSAsim Jamshed || socket->socktype == MOS_SOCK_MONITOR_RAW) {
464*76404edcSAsim Jamshed if (hook_point == MOS_NULL)
465*76404edcSAsim Jamshed evb = &socket->monitor_listener->dontcare_evb;
466*76404edcSAsim Jamshed else if (hook_point == MOS_HK_RCV)
467*76404edcSAsim Jamshed evb = &socket->monitor_listener->pre_tcp_evb;
468*76404edcSAsim Jamshed else if (hook_point == MOS_HK_SND)
469*76404edcSAsim Jamshed evb = &socket->monitor_listener->post_tcp_evb;
470*76404edcSAsim Jamshed else
471*76404edcSAsim Jamshed return -1;
472*76404edcSAsim Jamshed
473*76404edcSAsim Jamshed evp = &evb->dflt_evp;
474*76404edcSAsim Jamshed } else {
475*76404edcSAsim Jamshed errno = EINVAL;
476*76404edcSAsim Jamshed return -1;
477*76404edcSAsim Jamshed }
478*76404edcSAsim Jamshed
479*76404edcSAsim Jamshed return ModCb(mtcp, op, evp, evb, events, callback);
480*76404edcSAsim Jamshed }
481*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
482*76404edcSAsim Jamshed int
mtcp_register_callback(mctx_t mctx,int sockid,event_t events,int hook_point,callback_t callback)483*76404edcSAsim Jamshed mtcp_register_callback(mctx_t mctx, int sockid, event_t events,
484*76404edcSAsim Jamshed int hook_point, callback_t callback)
485*76404edcSAsim Jamshed {
486*76404edcSAsim Jamshed if (!callback) {
487*76404edcSAsim Jamshed errno = EINVAL;
488*76404edcSAsim Jamshed return -1;
489*76404edcSAsim Jamshed }
490*76404edcSAsim Jamshed
491*76404edcSAsim Jamshed return ModifyCallback(mctx, OP_REG, sockid, events, hook_point, callback);
492*76404edcSAsim Jamshed }
493*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
494*76404edcSAsim Jamshed int
mtcp_unregister_callback(mctx_t mctx,int sockid,event_t events,int hook_point)495*76404edcSAsim Jamshed mtcp_unregister_callback(mctx_t mctx, int sockid, event_t events,
496*76404edcSAsim Jamshed int hook_point)
497*76404edcSAsim Jamshed {
498*76404edcSAsim Jamshed return ModifyCallback(mctx, OP_UNREG, sockid, events, hook_point, NULL);
499*76404edcSAsim Jamshed }
500*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
501*76404edcSAsim Jamshed event_t
mtcp_define_event(event_t event,filter_t filter)502*76404edcSAsim Jamshed mtcp_define_event(event_t event, filter_t filter)
503*76404edcSAsim Jamshed {
504*76404edcSAsim Jamshed int i, j;
505*76404edcSAsim Jamshed int evid;
506*76404edcSAsim Jamshed
507*76404edcSAsim Jamshed if (!IS_PO2(event))
508*76404edcSAsim Jamshed return 0;
509*76404edcSAsim Jamshed
510*76404edcSAsim Jamshed if (!filter)
511*76404edcSAsim Jamshed return 0;
512*76404edcSAsim Jamshed
513*76404edcSAsim Jamshed for (i = 0; i < MAX_EV; i++)
514*76404edcSAsim Jamshed if (event == 1L << i) {
515*76404edcSAsim Jamshed evid = i;
516*76404edcSAsim Jamshed break;
517*76404edcSAsim Jamshed }
518*76404edcSAsim Jamshed if (i == MAX_EV)
519*76404edcSAsim Jamshed return 0;
520*76404edcSAsim Jamshed
521*76404edcSAsim Jamshed for (i = 0; i < NUM_UDE; i++) {
522*76404edcSAsim Jamshed const event_t ude = 1L << (i + UDE_OFFSET);
523*76404edcSAsim Jamshed if (g_ude_map & ude)
524*76404edcSAsim Jamshed continue;
525*76404edcSAsim Jamshed
526*76404edcSAsim Jamshed for (j = 0; j < NUM_UDE; j++)
527*76404edcSAsim Jamshed if (g_ude_id[evid][j] == -1) {
528*76404edcSAsim Jamshed g_ude_id[evid][j] = i + UDE_OFFSET;
529*76404edcSAsim Jamshed break;
530*76404edcSAsim Jamshed }
531*76404edcSAsim Jamshed if (j == NUM_UDE)
532*76404edcSAsim Jamshed return 0;
533*76404edcSAsim Jamshed
534*76404edcSAsim Jamshed /* Now we have valid UDE */
535*76404edcSAsim Jamshed
536*76404edcSAsim Jamshed /* update ancestor's descendants map */
537*76404edcSAsim Jamshed event_t ancestors = event |
538*76404edcSAsim Jamshed ((evid >= UDE_OFFSET) ? g_udes[evid - UDE_OFFSET].ancestors : 0);
539*76404edcSAsim Jamshed EVENT_FOREACH_START(ev, j, BEV_OFFSET, MAX_EV, ancestors) {
540*76404edcSAsim Jamshed g_descendants[j] |= ude;
541*76404edcSAsim Jamshed } EVENT_FOREACH_END(ev, ancestors);
542*76404edcSAsim Jamshed
543*76404edcSAsim Jamshed /* update my ancestor map */
544*76404edcSAsim Jamshed if (event & g_ude_map)
545*76404edcSAsim Jamshed g_udes[i].ancestors = event | g_udes[evid - UDE_OFFSET].ancestors;
546*76404edcSAsim Jamshed else
547*76404edcSAsim Jamshed g_udes[i].ancestors = event;
548*76404edcSAsim Jamshed
549*76404edcSAsim Jamshed g_udes[i].parent = evid;
550*76404edcSAsim Jamshed g_udes[i].ft = filter;
551*76404edcSAsim Jamshed g_ude_map |= ude;
552*76404edcSAsim Jamshed
553*76404edcSAsim Jamshed return ude;
554*76404edcSAsim Jamshed }
555*76404edcSAsim Jamshed
556*76404edcSAsim Jamshed return 0;
557*76404edcSAsim Jamshed }
558*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
559*76404edcSAsim Jamshed int
mtcp_remove_ude(event_t event)560*76404edcSAsim Jamshed mtcp_remove_ude(event_t event)
561*76404edcSAsim Jamshed {
562*76404edcSAsim Jamshed /* FIXME: this function is not implemented yet.
563*76404edcSAsim Jamshed * What should we do if we remove UDE while running? */
564*76404edcSAsim Jamshed if (!IS_PO2(event))
565*76404edcSAsim Jamshed return -1;
566*76404edcSAsim Jamshed
567*76404edcSAsim Jamshed if (!(g_ude_map & event))
568*76404edcSAsim Jamshed return -1;
569*76404edcSAsim Jamshed
570*76404edcSAsim Jamshed return -1;
571*76404edcSAsim Jamshed }
572*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
573*76404edcSAsim Jamshed #define PUSH(sp, ev_idx, ft_idx, data) \
574*76404edcSAsim Jamshed do { \
575*76404edcSAsim Jamshed sp->ev_idx = ev_idx; \
576*76404edcSAsim Jamshed sp->ft_idx = ft_idx; \
577*76404edcSAsim Jamshed sp->data.u64 = data.u64; \
578*76404edcSAsim Jamshed sp++; \
579*76404edcSAsim Jamshed } while (0)
580*76404edcSAsim Jamshed #define POP(sp, ev_idx, ft_idx, data) \
581*76404edcSAsim Jamshed do { \
582*76404edcSAsim Jamshed sp--; \
583*76404edcSAsim Jamshed ev_idx = sp->ev_idx; \
584*76404edcSAsim Jamshed ft_idx = sp->ft_idx; \
585*76404edcSAsim Jamshed data.u64 = sp->data.u64; \
586*76404edcSAsim Jamshed } while (0)
587*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
588*76404edcSAsim Jamshed /**
589*76404edcSAsim Jamshed * TODO:
590*76404edcSAsim Jamshed * - Donghwi, please change POST_TCP names to POST_SND &
591*76404edcSAsim Jamshed * PRE_TCP names to POST_RCV.
592*76404edcSAsim Jamshed *
593*76404edcSAsim Jamshed * - Please make sure that the order of event invocations is:
594*76404edcSAsim Jamshed * MOS_ON_CONN_START --> .. MOS_ON_* .. --> MOS_ON_CONN_END
595*76404edcSAsim Jamshed */
596*76404edcSAsim Jamshed inline void
HandleCallback(mtcp_manager_t mtcp,uint32_t hook,socket_map_t socket,int side,struct pkt_ctx * pctx,event_t events)597*76404edcSAsim Jamshed HandleCallback(mtcp_manager_t mtcp, uint32_t hook,
598*76404edcSAsim Jamshed socket_map_t socket, int side, struct pkt_ctx *pctx, event_t events)
599*76404edcSAsim Jamshed {
600*76404edcSAsim Jamshed struct sfbpf_program fcode;
601*76404edcSAsim Jamshed int8_t ude_id;
602*76404edcSAsim Jamshed uint64_t cb_map, ft_map;
603*76404edcSAsim Jamshed
604*76404edcSAsim Jamshed int8_t ev_idx, ft_idx;
605*76404edcSAsim Jamshed event_data_t data;
606*76404edcSAsim Jamshed
607*76404edcSAsim Jamshed if (!socket)
608*76404edcSAsim Jamshed return;
609*76404edcSAsim Jamshed
610*76404edcSAsim Jamshed if (!events)
611*76404edcSAsim Jamshed return;
612*76404edcSAsim Jamshed assert(events);
613*76404edcSAsim Jamshed
614*76404edcSAsim Jamshed /* if client side monitoring is disabled, then skip */
615*76404edcSAsim Jamshed if (side == MOS_SIDE_CLI && socket->monitor_stream->client_mon == 0)
616*76404edcSAsim Jamshed return;
617*76404edcSAsim Jamshed /* if server side monitoring is disabled, then skip */
618*76404edcSAsim Jamshed else if (side == MOS_SIDE_SVR && socket->monitor_stream->server_mon == 0)
619*76404edcSAsim Jamshed return;
620*76404edcSAsim Jamshed
621*76404edcSAsim Jamshed #define MSTRM(sock) (sock)->monitor_stream
622*76404edcSAsim Jamshed #define MLSNR(sock) (sock)->monitor_listener
623*76404edcSAsim Jamshed /* We use `?:` notation instead of `if/else` to make `evp` as const */
624*76404edcSAsim Jamshed struct ev_pointer * const evp =
625*76404edcSAsim Jamshed (socket->socktype == MOS_SOCK_MONITOR_STREAM_ACTIVE) ?
626*76404edcSAsim Jamshed ((hook == MOS_HK_RCV) ? &MSTRM(socket)->pre_tcp_evp :
627*76404edcSAsim Jamshed (hook == MOS_HK_SND) ? &MSTRM(socket)->post_tcp_evp :
628*76404edcSAsim Jamshed &MSTRM(socket)->dontcare_evp)
629*76404edcSAsim Jamshed : (socket->socktype == MOS_SOCK_MONITOR_STREAM)
630*76404edcSAsim Jamshed || (socket->socktype == MOS_SOCK_MONITOR_RAW) ?
631*76404edcSAsim Jamshed ((hook == MOS_HK_RCV) ? &MLSNR(socket)->pre_tcp_evb.dflt_evp :
632*76404edcSAsim Jamshed (hook == MOS_HK_SND) ? &MLSNR(socket)->post_tcp_evb.dflt_evp :
633*76404edcSAsim Jamshed &MLSNR(socket)->dontcare_evb.dflt_evp) :
634*76404edcSAsim Jamshed NULL;
635*76404edcSAsim Jamshed
636*76404edcSAsim Jamshed if (!evp || !((cb_map = events & evp->cb_map) || (g_ude_map & evp->cb_map)))
637*76404edcSAsim Jamshed return;
638*76404edcSAsim Jamshed
639*76404edcSAsim Jamshed /* mtcp_bind_monitor_filter()
640*76404edcSAsim Jamshed * - BPF filter is evaluated only for RAW socket and PASSIVE socket (= orphan filter)
641*76404edcSAsim Jamshed * - stream syn filter is moved to and evaluated on socket creation */
642*76404edcSAsim Jamshed if (socket->socktype == MOS_SOCK_MONITOR_STREAM) {
643*76404edcSAsim Jamshed fcode = MLSNR(socket)->stream_orphan_fcode;
644*76404edcSAsim Jamshed /* if not match with filter, return */
645*76404edcSAsim Jamshed if (ISSET_BPFFILTER(fcode) && pctx && EVAL_BPFFILTER(fcode,
646*76404edcSAsim Jamshed (uint8_t *)pctx->p.iph - sizeof(struct ethhdr),
647*76404edcSAsim Jamshed pctx->p.ip_len + sizeof(struct ethhdr)) == 0)
648*76404edcSAsim Jamshed return;
649*76404edcSAsim Jamshed }
650*76404edcSAsim Jamshed if (socket->socktype == MOS_SOCK_MONITOR_RAW) {
651*76404edcSAsim Jamshed fcode = MLSNR(socket)->raw_pkt_fcode;
652*76404edcSAsim Jamshed /* if not match with filter, return */
653*76404edcSAsim Jamshed if (ISSET_BPFFILTER(fcode) && pctx && EVAL_BPFFILTER(fcode,
654*76404edcSAsim Jamshed (uint8_t *)pctx->p.iph - sizeof(struct ethhdr),
655*76404edcSAsim Jamshed pctx->p.ip_len + sizeof(struct ethhdr)) == 0)
656*76404edcSAsim Jamshed return;
657*76404edcSAsim Jamshed }
658*76404edcSAsim Jamshed
659*76404edcSAsim Jamshed ft_map = events & evp->ft_map;
660*76404edcSAsim Jamshed
661*76404edcSAsim Jamshed event_t bev_map = cb_map | ft_map;
662*76404edcSAsim Jamshed struct ev_table * const evt = evp->evt;
663*76404edcSAsim Jamshed
664*76404edcSAsim Jamshed struct {
665*76404edcSAsim Jamshed int8_t ev_idx;
666*76404edcSAsim Jamshed int8_t ft_idx;
667*76404edcSAsim Jamshed event_data_t data;
668*76404edcSAsim Jamshed } stack[NUM_UDE + 1], *sp = stack;
669*76404edcSAsim Jamshed
670*76404edcSAsim Jamshed mtcp->pctx = pctx; /* for mtcp_getlastpkt() */
671*76404edcSAsim Jamshed mctx_t const mctx = g_ctx[mtcp->ctx->cpu];
672*76404edcSAsim Jamshed
673*76404edcSAsim Jamshed EVENT_FOREACH_START(bev, bidx, BEV_OFFSET, BEV_OFFSET + NUM_BEV, bev_map) {
674*76404edcSAsim Jamshed ev_idx = bidx;
675*76404edcSAsim Jamshed ft_idx = 0;
676*76404edcSAsim Jamshed data.u64 = 0;
677*76404edcSAsim Jamshed const event_t descendants = g_descendants[ev_idx];
678*76404edcSAsim Jamshed if (descendants) {
679*76404edcSAsim Jamshed cb_map |= descendants & evp->cb_map;
680*76404edcSAsim Jamshed ft_map |= descendants & evp->ft_map;
681*76404edcSAsim Jamshed }
682*76404edcSAsim Jamshed
683*76404edcSAsim Jamshed while (1) {
684*76404edcSAsim Jamshed const uint64_t ev = (1L << ev_idx);
685*76404edcSAsim Jamshed
686*76404edcSAsim Jamshed if (cb_map & ev) {
687*76404edcSAsim Jamshed /* call callback */
688*76404edcSAsim Jamshed evt->ent[ev_idx].cb(mctx, socket->id, side, ev, data);
689*76404edcSAsim Jamshed
690*76404edcSAsim Jamshed if (!(cb_map &= ~ev))
691*76404edcSAsim Jamshed return;
692*76404edcSAsim Jamshed }
693*76404edcSAsim Jamshed
694*76404edcSAsim Jamshed while (1) {
695*76404edcSAsim Jamshed event_data_t tmpdata;
696*76404edcSAsim Jamshed if (ft_idx >= NUM_UDE
697*76404edcSAsim Jamshed || (ude_id = g_ude_id[ev_idx][ft_idx]) < 0) {
698*76404edcSAsim Jamshed /* done with this event */
699*76404edcSAsim Jamshed if (sp == stack /* stack is empty */) {
700*76404edcSAsim Jamshed /* go to next built-in event */
701*76404edcSAsim Jamshed goto __continue;
702*76404edcSAsim Jamshed } else {
703*76404edcSAsim Jamshed POP(sp, ev_idx, ft_idx, data);
704*76404edcSAsim Jamshed ft_idx++;
705*76404edcSAsim Jamshed }
706*76404edcSAsim Jamshed break;
707*76404edcSAsim Jamshed }
708*76404edcSAsim Jamshed
709*76404edcSAsim Jamshed assert(ude_id >= UDE_OFFSET && ude_id < MAX_EV);
710*76404edcSAsim Jamshed
711*76404edcSAsim Jamshed if (((1L << ude_id) & (cb_map | ft_map)) &&
712*76404edcSAsim Jamshed (tmpdata.u64 = g_udes[ude_id - UDE_OFFSET].ft(mctx, socket->id, side, ev, data))) {
713*76404edcSAsim Jamshed /* DFS jump */
714*76404edcSAsim Jamshed PUSH(sp, ev_idx, ft_idx, data);
715*76404edcSAsim Jamshed ev_idx = ude_id;
716*76404edcSAsim Jamshed ft_idx = 0;
717*76404edcSAsim Jamshed data.u64 = tmpdata.u64;
718*76404edcSAsim Jamshed break;
719*76404edcSAsim Jamshed }
720*76404edcSAsim Jamshed
721*76404edcSAsim Jamshed ft_idx++;
722*76404edcSAsim Jamshed }
723*76404edcSAsim Jamshed }
724*76404edcSAsim Jamshed }
725*76404edcSAsim Jamshed __continue:
726*76404edcSAsim Jamshed EVENT_FOREACH_END(bev, bev_map);
727*76404edcSAsim Jamshed }
728*76404edcSAsim Jamshed /*----------------------------------------------------------------------------*/
729*76404edcSAsim Jamshed
730*76404edcSAsim Jamshed #endif
731