72 |
72 |
73 if (ret < 0) { |
73 if (ret < 0) { |
74 // EAGAIN -> zero bytes |
74 // EAGAIN -> zero bytes |
75 *len = 0; |
75 *len = 0; |
76 |
76 |
|
77 if (transport->info.ev_mask & TRANSPORT_WRITE) |
|
78 // enable the write event |
|
79 if ((ERROR_CODE(err) = transport_fd_enable(fd, TRANSPORT_WRITE))) |
|
80 return ERROR_CODE(err); |
|
81 |
77 } else { |
82 } else { |
78 // normal -> bytes read |
83 // normal -> bytes read |
79 *len = ret; |
84 *len = ret; |
80 } |
85 } |
81 |
86 |
82 return SUCCESS; |
87 return SUCCESS; |
83 } |
88 } |
84 |
89 |
|
90 err_t transport_fd_methods_events (transport_t *transport, short mask, error_t *err) |
|
91 { |
|
92 struct transport_fd *fd = transport_check(transport, &transport_fd_type); |
|
93 |
|
94 short _mask = 0; |
|
95 |
|
96 // enable read as requested |
|
97 if (mask & TRANSPORT_READ) |
|
98 _mask |= TRANSPORT_READ; |
|
99 |
|
100 // enable write if requested and it's currently enabled |
|
101 if ((mask & TRANSPORT_WRITE) && event_pending(fd->ev_write, EV_WRITE, NULL)) |
|
102 _mask |= TRANSPORT_WRITE; |
|
103 |
|
104 // set |
|
105 return (ERROR_CODE(err) = transport_fd_events(fd, mask)); |
|
106 } |
|
107 |
85 void _transport_fd_destroy (transport_t *transport) |
108 void _transport_fd_destroy (transport_t *transport) |
86 { |
109 { |
87 |
|
88 struct transport_fd *fd = transport_check(transport, &transport_fd_type); |
110 struct transport_fd *fd = transport_check(transport, &transport_fd_type); |
89 |
111 |
90 transport_fd_destroy(fd); |
112 transport_fd_destroy(fd); |
91 } |
113 } |
92 |
114 |
93 const struct transport_methods transport_fd_methods = { |
115 const struct transport_methods transport_fd_methods = { |
94 .read = transport_fd_methods_read, |
116 .read = transport_fd_methods_read, |
95 .write = transport_fd_methods_write, |
117 .write = transport_fd_methods_write, |
|
118 .events = transport_fd_methods_events, |
96 .destroy = _transport_fd_destroy |
119 .destroy = _transport_fd_destroy |
97 }; |
120 }; |
|
121 |
|
122 /** |
|
123 * Dummy callbacks |
|
124 */ |
|
125 void transport_fd_callback_user (struct transport_fd *fd, short what, void *arg) |
|
126 { |
|
127 (void) arg; |
|
128 |
|
129 // proxy |
|
130 transport_invoke(TRANSPORT_FD_BASE(fd), what); |
|
131 } |
98 |
132 |
99 /** |
133 /** |
100 * Function implementations |
134 * Function implementations |
101 */ |
135 */ |
102 void transport_fd_init (struct transport_fd *fd, struct event_base *ev_base, int _fd) |
136 void transport_fd_init (struct transport_fd *fd, struct event_base *ev_base, int _fd) |
137 { |
171 { |
138 assert(fd->fd != TRANSPORT_FD_INVALID); |
172 assert(fd->fd != TRANSPORT_FD_INVALID); |
139 assert(!fd->ev_read && !fd->ev_write); |
173 assert(!fd->ev_read && !fd->ev_write); |
140 |
174 |
141 // create new events |
175 // create new events |
142 if ((fd->ev_read = event_new(fd->ev_base, fd->fd, EV_READ, transport_fd_on_event, fd)) == NULL) |
176 if ((fd->ev_read = event_new(fd->ev_base, fd->fd, EV_READ | EV_PERSIST, transport_fd_on_event, fd)) == NULL) |
143 goto err_event_add; |
177 goto err_event_add; |
144 |
178 |
145 if ((fd->ev_write = event_new(fd->ev_base, fd->fd, EV_WRITE, transport_fd_on_event, fd)) == NULL) |
179 if ((fd->ev_write = event_new(fd->ev_base, fd->fd, EV_WRITE, transport_fd_on_event, fd)) == NULL) |
146 goto err_event_add; |
180 goto err_event_add; |
147 |
181 |
164 fd->cb_func = cb_func; |
198 fd->cb_func = cb_func; |
165 fd->cb_arg = cb_arg; |
199 fd->cb_arg = cb_arg; |
166 |
200 |
167 // install the event handlers? |
201 // install the event handlers? |
168 if (!fd->ev_read || !fd->ev_write) |
202 if (!fd->ev_read || !fd->ev_write) |
169 transport_fd_install(fd); |
203 return transport_fd_install(fd); |
170 |
204 else |
171 |
205 return SUCCESS; |
172 return SUCCESS; |
|
173 } |
206 } |
174 |
207 |
175 err_t transport_fd_enable (struct transport_fd *fd, short mask) |
208 err_t transport_fd_enable (struct transport_fd *fd, short mask) |
176 { |
209 { |
177 // just add the appropraite events |
210 // just add the appropriate events |
178 if (mask & EV_READ && event_add(fd->ev_read, NULL)) |
211 if (mask & TRANSPORT_READ && event_add(fd->ev_read, NULL)) |
179 return ERR_EVENT_ADD; |
212 return ERR_EVENT_ADD; |
180 |
213 |
181 if (mask & EV_WRITE && event_add(fd->ev_write, NULL)) |
214 if (mask & TRANSPORT_WRITE && event_add(fd->ev_write, NULL)) |
182 return ERR_EVENT_ADD; |
215 return ERR_EVENT_ADD; |
183 |
216 |
184 |
217 |
|
218 return SUCCESS; |
|
219 } |
|
220 |
|
221 err_t transport_fd_disable (struct transport_fd *fd, short mask) |
|
222 { |
|
223 if (mask & TRANSPORT_READ && event_del(fd->ev_read)) |
|
224 return ERR_EVENT_DEL; |
|
225 |
|
226 if (mask & TRANSPORT_WRITE && event_del(fd->ev_write)) |
|
227 return ERR_EVENT_DEL; |
|
228 |
|
229 |
|
230 return SUCCESS; |
|
231 } |
|
232 |
|
233 err_t transport_fd_events (struct transport_fd *fd, short mask) |
|
234 { |
|
235 err_t err; |
|
236 |
|
237 // enable/disable read |
|
238 if (mask & TRANSPORT_READ) |
|
239 err = event_add(fd->ev_read, NULL); |
|
240 else |
|
241 err = event_del(fd->ev_read); |
|
242 |
|
243 if (err) |
|
244 return err; |
|
245 |
|
246 // enable/disable write |
|
247 if (mask & TRANSPORT_WRITE) |
|
248 err = event_add(fd->ev_write, NULL); |
|
249 else |
|
250 err = event_del(fd->ev_write); |
|
251 |
|
252 if (err) |
|
253 return err; |
|
254 |
|
255 // ok |
185 return SUCCESS; |
256 return SUCCESS; |
186 } |
257 } |
187 |
258 |
188 /** |
259 /** |
189 * Remove our current ev_* events, but leave the cb_* intact. |
260 * Remove our current ev_* events, but leave the cb_* intact. |