solidc
Robust collection of general-purpose cross-platform C libraries and data structures designed for rapid and safe development in C
Loading...
Searching...
No Matches
epoll.h
1#ifndef SOLIDC_EPOLL_H
2#define SOLIDC_EPOLL_H
3
12#include <errno.h>
13#include <stddef.h>
14#include <stdint.h>
15#include <stdlib.h>
16
17/* =========================================================================
18 * LINUX IMPLEMENTATION (Native)
19 * ========================================================================= */
20#if defined(__linux__)
21
22#include <sys/epoll.h>
23
24/* =========================================================================
25 * BSD / MACOS IMPLEMENTATION (Kqueue Wrapper)
26 * ========================================================================= */
27#elif defined(__FreeBSD__) || defined(__APPLE__) || defined(__OpenBSD__) || defined(__NetBSD__)
28
29#include <string.h>
30#include <sys/event.h>
31#include <sys/time.h>
32#include <sys/types.h>
33#include <unistd.h>
34
35/* -------------------------------------------------------------------------
36 * Data Types and Structs (Matching Linux ABI)
37 * ------------------------------------------------------------------------- */
38
39typedef union epoll_data {
40 void* ptr;
41 int fd;
42 uint32_t u32;
43 uint64_t u64;
44} epoll_data_t;
45
46struct epoll_event {
47 uint32_t events; /* Epoll events */
48 epoll_data_t data; /* User data variable */
49} __attribute__((packed));
50
51/* -------------------------------------------------------------------------
52 * Flags (Mimicking Linux constants)
53 * ------------------------------------------------------------------------- */
54
55#define EPOLLIN 0x001
56#define EPOLLPRI 0x002
57#define EPOLLOUT 0x004
58#define EPOLLERR 0x008
59#define EPOLLHUP 0x010
60#define EPOLLRDHUP 0x2000
61#define EPOLLEXCLUSIVE 0x10000000 /* Use proper flag for kqueue */
62#define EPOLLET (1U << 31) /* Using high bit for Edge Triggered */
63#define EPOLLONESHOT (1U << 30)
64
65/* Op codes for epoll_ctl */
66#define EPOLL_CTL_ADD 1
67#define EPOLL_CTL_DEL 2
68#define EPOLL_CTL_MOD 3
69
70/* -------------------------------------------------------------------------
71 * Polyfill Functions
72 * ------------------------------------------------------------------------- */
73
78static inline int epoll_create1(int flags) {
79 (void)flags;
80 return kqueue();
81}
82
86static inline int epoll_create(int size) {
87 (void)size;
88 return kqueue();
89}
90
95static inline int epoll_ctl(int epfd, int op, int fd, struct epoll_event* event) {
96 struct kevent kev[2];
97 int nchanges = 0;
98 int flags_read = 0;
99 int flags_write = 0;
100
101 /* Default kqueue flags */
102 uint16_t k_flags = 0;
103
104 if (op == EPOLL_CTL_ADD) {
105 k_flags = EV_ADD | EV_ENABLE;
106 } else if (op == EPOLL_CTL_MOD) {
107 k_flags = EV_ADD | EV_ENABLE;
108 /* In kqueue, re-adding with EV_ADD overwrites/modifies the existing filter */
109 } else if (op == EPOLL_CTL_DEL) {
110 k_flags = EV_DELETE;
111 } else {
112 errno = EINVAL;
113 return -1;
114 }
115
116 /* Handle Edge Triggering */
117 if (event && (event->events & EPOLLET)) {
118 k_flags |= EV_CLEAR;
119 }
120 /* Handle One Shot */
121 if (event && (event->events & EPOLLONESHOT)) {
122 k_flags |= EV_ONESHOT;
123 }
124
125 /* Prepare READ filter */
126 if (op == EPOLL_CTL_DEL || (event && (event->events & (EPOLLIN | EPOLLPRI | EPOLLRDHUP)))) {
127 /* Note: kqueue doesn't distinguish PRI/RDHUP exactly like epoll,
128 mapped to standard READ */
129 EV_SET(&kev[nchanges++], fd, EVFILT_READ, k_flags, 0, 0, event ? event->data.ptr : NULL);
130 }
131
132 /* Prepare WRITE filter */
133 if (op == EPOLL_CTL_DEL || (event && (event->events & EPOLLOUT))) {
134 EV_SET(&kev[nchanges++], fd, EVFILT_WRITE, k_flags, 0, 0, event ? event->data.ptr : NULL);
135 }
136
137 /*
138 * Special Case for MOD:
139 * If modifying, we might be removing a flag (e.g. IN|OUT -> IN).
140 * kqueue EV_ADD will update settings but won't remove a filter if we simply don't set it.
141 * To strictly emulate epoll_ctl MOD, we should delete the filter we aren't setting.
142 * However, for performance in this shim, we assume strict Add/Delete usage or
143 * that the user understands kqueue semantics.
144 *
145 * Robust MOD implementation:
146 */
147 if (op == EPOLL_CTL_MOD && event) {
148 /* If we want only IN, but previously had OUT, we must delete OUT. */
149 if (!(event->events & EPOLLOUT)) {
150 struct kevent del_ev;
151 EV_SET(&del_ev, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
152 kevent(epfd, &del_ev, 1, NULL, 0, NULL); /* Ignore error if it didn't exist */
153 }
154 if (!(event->events & (EPOLLIN | EPOLLPRI))) {
155 struct kevent del_ev;
156 EV_SET(&del_ev, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
157 kevent(epfd, &del_ev, 1, NULL, 0, NULL); /* Ignore error */
158 }
159 }
160
161 if (nchanges == 0) return 0;
162
163 return kevent(epfd, kev, nchanges, NULL, 0, NULL);
164}
165
170static inline int epoll_wait(int epfd, struct epoll_event* events, int maxevents, int timeout_ms) {
171 struct timespec ts;
172 struct timespec* ts_ptr = NULL;
173
174 if (timeout_ms >= 0) {
175 ts.tv_sec = timeout_ms / 1000;
176 ts.tv_nsec = (timeout_ms % 1000) * 1000000;
177 ts_ptr = &ts;
178 }
179
180 /*
181 * We cannot cast (struct epoll_event*) to (struct kevent*) in-place
182 * because the structures have different sizes (kevent is usually larger).
183 * We must allocate a temporary buffer for kevents.
184 */
185 struct kevent* k_events = (struct kevent*)malloc(sizeof(struct kevent) * maxevents);
186 if (!k_events) {
187 errno = ENOMEM;
188 return -1;
189 }
190
191 int nfds = kevent(epfd, NULL, 0, k_events, maxevents, ts_ptr);
192
193 if (nfds > 0) {
194 for (int i = 0; i < nfds; ++i) {
195 uint32_t ev_flags = 0;
196
197 if (k_events[i].filter == EVFILT_READ) {
198 ev_flags |= EPOLLIN;
199 if (k_events[i].flags & EV_EOF) {
200 ev_flags |= EPOLLRDHUP; // Linux specific roughly maps here
201 }
202 } else if (k_events[i].filter == EVFILT_WRITE) {
203 ev_flags |= EPOLLOUT;
204 }
205
206 if (k_events[i].flags & EV_ERROR) {
207 ev_flags |= EPOLLERR;
208 }
209 if (k_events[i].flags & EV_EOF) {
210 ev_flags |= EPOLLHUP;
211 }
212
213 events[i].events = ev_flags;
214 events[i].data.ptr = k_events[i].udata;
215 }
216 }
217
218 free(k_events);
219 return nfds;
220}
221
222#else
223#error "Unsupported platform: This header supports Linux (native) and BSD/macOS (kqueue shim)."
224#endif
225
226#include <arpa/inet.h> // for socket, bind, listen, accept
227#include <errno.h> // for errno
228#include <fcntl.h> // for fcntl, F_GETFL, F_SETFL
229#include <netinet/in.h> // for sockaddr_in, INADDR_ANY
230#include <stdbool.h>
231#include <stdio.h>
232#include <sys/socket.h> // for socket, SOL_SOCKET, SO_REUSEADDR
233#include <unistd.h> // for close, read, write
234
238static inline int set_nonblocking(int fd) {
239 int flags = fcntl(fd, F_GETFL, 0);
240 if (flags == -1) {
241 return -1;
242 }
243 return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
244}
245
250static inline int create_listen_socket(uint16_t port) {
251 int fd = socket(AF_INET, SOCK_STREAM, 0);
252 if (fd == -1) {
253 perror("socket");
254 return -1;
255 }
256
257 // Set SO_REUSEPORT: This is critical for load balancing
258 int opt = 1;
259 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) == -1) {
260 perror("setsockopt SO_REUSEPORT");
261 close(fd);
262 return -1;
263 }
264
265 // Also set SO_REUSEADDR usually to allow restart during TIME_WAIT
266 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) == -1) {
267 perror("setsockopt SO_REUSEADDR");
268 close(fd);
269 return -1;
270 }
271
272 struct sockaddr_in addr = {0};
273 addr.sin_family = AF_INET;
274 addr.sin_addr.s_addr = INADDR_ANY;
275 addr.sin_port = htons(port);
276
277 if (bind(fd, (struct sockaddr*)&addr, sizeof(addr)) == -1) {
278 perror("bind");
279 close(fd);
280 return -1;
281 }
282
283 if (listen(fd, SOMAXCONN) == -1) {
284 perror("listen");
285 close(fd);
286 return -1;
287 }
288
289 if (set_nonblocking(fd) == -1) {
290 perror("set_nonblocking");
291 close(fd);
292 return -1;
293 }
294
295 return fd;
296}
297
298#endif /* SOLIDC_EPOLL_H */