hloop.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927
  1. #include "hloop.h"
  2. #include "hevent.h"
  3. #include "iowatcher.h"
  4. #include "hdef.h"
  5. #include "hbase.h"
  6. #include "hlog.h"
  7. #include "hmath.h"
  8. #include "htime.h"
  9. #include "hsocket.h"
  10. #include "hthread.h"
  11. #define HLOOP_PAUSE_TIME 10 // ms
  12. #define HLOOP_MAX_BLOCK_TIME 100 // ms
  13. #define HLOOP_STAT_TIMEOUT 60000 // ms
  14. #define IO_ARRAY_INIT_SIZE 1024
  15. #define CUSTOM_EVENT_QUEUE_INIT_SIZE 16
  16. #define SOCKPAIR_WRITE_INDEX 0
  17. #define SOCKPAIR_READ_INDEX 1
  18. static void __hidle_del(hidle_t* idle);
  19. static void __htimer_del(htimer_t* timer);
  20. static int timers_compare(const struct heap_node* lhs, const struct heap_node* rhs) {
  21. return TIMER_ENTRY(lhs)->next_timeout < TIMER_ENTRY(rhs)->next_timeout;
  22. }
  23. static int hloop_process_idles(hloop_t* loop) {
  24. int nidles = 0;
  25. struct list_node* node = loop->idles.next;
  26. hidle_t* idle = NULL;
  27. while (node != &loop->idles) {
  28. idle = IDLE_ENTRY(node);
  29. node = node->next;
  30. if (idle->repeat != INFINITE) {
  31. --idle->repeat;
  32. }
  33. if (idle->repeat == 0) {
  34. // NOTE: Just mark it as destroy and remove from list.
  35. // Real deletion occurs after hloop_process_pendings.
  36. __hidle_del(idle);
  37. }
  38. EVENT_PENDING(idle);
  39. ++nidles;
  40. }
  41. return nidles;
  42. }
  43. static int hloop_process_timers(hloop_t* loop) {
  44. int ntimers = 0;
  45. htimer_t* timer = NULL;
  46. uint64_t now_hrtime = hloop_now_hrtime(loop);
  47. while (loop->timers.root) {
  48. // NOTE: root of minheap has min timeout.
  49. timer = TIMER_ENTRY(loop->timers.root);
  50. if (timer->next_timeout > now_hrtime) {
  51. break;
  52. }
  53. if (timer->repeat != INFINITE) {
  54. --timer->repeat;
  55. }
  56. if (timer->repeat == 0) {
  57. // NOTE: Just mark it as destroy and remove from heap.
  58. // Real deletion occurs after hloop_process_pendings.
  59. __htimer_del(timer);
  60. }
  61. else {
  62. // NOTE: calc next timeout, then re-insert heap.
  63. heap_dequeue(&loop->timers);
  64. if (timer->event_type == HEVENT_TYPE_TIMEOUT) {
  65. while (timer->next_timeout <= now_hrtime) {
  66. timer->next_timeout += (uint64_t)((htimeout_t*)timer)->timeout * 1000;
  67. }
  68. }
  69. else if (timer->event_type == HEVENT_TYPE_PERIOD) {
  70. hperiod_t* period = (hperiod_t*)timer;
  71. timer->next_timeout = (uint64_t)cron_next_timeout(period->minute, period->hour, period->day,
  72. period->week, period->month) * 1000000;
  73. }
  74. heap_insert(&loop->timers, &timer->node);
  75. }
  76. EVENT_PENDING(timer);
  77. ++ntimers;
  78. }
  79. return ntimers;
  80. }
  81. static int hloop_process_ios(hloop_t* loop, int timeout) {
  82. // That is to call IO multiplexing function such as select, poll, epoll, etc.
  83. int nevents = iowatcher_poll_events(loop, timeout);
  84. if (nevents < 0) {
  85. hlogd("poll_events error=%d", -nevents);
  86. }
  87. return nevents < 0 ? 0 : nevents;
  88. }
  89. static int hloop_process_pendings(hloop_t* loop) {
  90. if (loop->npendings == 0) return 0;
  91. hevent_t* cur = NULL;
  92. hevent_t* next = NULL;
  93. int ncbs = 0;
  94. // NOTE: invoke event callback from high to low sorted by priority.
  95. for (int i = HEVENT_PRIORITY_SIZE-1; i >= 0; --i) {
  96. cur = loop->pendings[i];
  97. while (cur) {
  98. next = cur->pending_next;
  99. if (cur->pending) {
  100. if (cur->active && cur->cb) {
  101. cur->cb(cur);
  102. ++ncbs;
  103. }
  104. cur->pending = 0;
  105. // NOTE: Now we can safely delete event marked as destroy.
  106. if (cur->destroy) {
  107. EVENT_DEL(cur);
  108. }
  109. }
  110. cur = next;
  111. }
  112. loop->pendings[i] = NULL;
  113. }
  114. loop->npendings = 0;
  115. return ncbs;
  116. }
  117. // hloop_process_ios -> hloop_process_timers -> hloop_process_idles -> hloop_process_pendings
  118. static int hloop_process_events(hloop_t* loop) {
  119. // ios -> timers -> idles
  120. int nios, ntimers, nidles;
  121. nios = ntimers = nidles = 0;
  122. // calc blocktime
  123. int32_t blocktime = HLOOP_MAX_BLOCK_TIME;
  124. if (loop->timers.root) {
  125. hloop_update_time(loop);
  126. uint64_t next_min_timeout = TIMER_ENTRY(loop->timers.root)->next_timeout;
  127. int64_t blocktime_us = next_min_timeout - hloop_now_hrtime(loop);
  128. if (blocktime_us <= 0) goto process_timers;
  129. blocktime = blocktime_us / 1000;
  130. ++blocktime;
  131. blocktime = MIN(blocktime, HLOOP_MAX_BLOCK_TIME);
  132. }
  133. if (loop->nios) {
  134. nios = hloop_process_ios(loop, blocktime);
  135. } else {
  136. hv_msleep(blocktime);
  137. }
  138. hloop_update_time(loop);
  139. // wakeup by hloop_stop
  140. if (loop->status == HLOOP_STATUS_STOP) {
  141. return 0;
  142. }
  143. process_timers:
  144. if (loop->ntimers) {
  145. ntimers = hloop_process_timers(loop);
  146. }
  147. int npendings = loop->npendings;
  148. if (npendings == 0) {
  149. if (loop->nidles) {
  150. nidles= hloop_process_idles(loop);
  151. }
  152. }
  153. int ncbs = hloop_process_pendings(loop);
  154. // printd("blocktime=%d nios=%d/%u ntimers=%d/%u nidles=%d/%u nactives=%d npendings=%d ncbs=%d\n",
  155. // blocktime, nios, loop->nios, ntimers, loop->ntimers, nidles, loop->nidles,
  156. // loop->nactives, npendings, ncbs);
  157. return ncbs;
  158. }
  159. static void hloop_stat_timer_cb(htimer_t* timer) {
  160. hloop_t* loop = timer->loop;
  161. // hlog_set_level(LOG_LEVEL_DEBUG);
  162. hlogd("[loop] pid=%ld tid=%ld uptime=%lluus cnt=%llu nactives=%u nios=%u ntimers=%u nidles=%u",
  163. loop->pid, loop->tid, loop->cur_hrtime - loop->start_hrtime, loop->loop_cnt,
  164. loop->nactives, loop->nios, loop->ntimers, loop->nidles);
  165. }
  166. static void sockpair_read_cb(hio_t* io, void* buf, int readbytes) {
  167. hloop_t* loop = io->loop;
  168. hevent_t* pev = NULL;
  169. hevent_t ev;
  170. for (int i = 0; i < readbytes; ++i) {
  171. hmutex_lock(&loop->custom_events_mutex);
  172. if (event_queue_empty(&loop->custom_events)) {
  173. goto unlock;
  174. }
  175. pev = event_queue_front(&loop->custom_events);
  176. if (pev == NULL) {
  177. goto unlock;
  178. }
  179. ev = *pev;
  180. event_queue_pop_front(&loop->custom_events);
  181. // NOTE: unlock before cb, avoid deadlock if hloop_post_event called in cb.
  182. hmutex_unlock(&loop->custom_events_mutex);
  183. if (ev.cb) {
  184. ev.cb(&ev);
  185. }
  186. }
  187. return;
  188. unlock:
  189. hmutex_unlock(&loop->custom_events_mutex);
  190. }
  191. static int hloop_create_sockpair(hloop_t* loop) {
  192. if (Socketpair(AF_INET, SOCK_STREAM, 0, loop->sockpair) != 0) {
  193. hloge("socketpair create failed!");
  194. return -1;
  195. }
  196. hio_t* io = hread(loop, loop->sockpair[SOCKPAIR_READ_INDEX], loop->readbuf.base, loop->readbuf.len, sockpair_read_cb);
  197. io->priority = HEVENT_HIGH_PRIORITY;
  198. // NOTE: Avoid duplication closesocket in hio_cleanup
  199. loop->sockpair[SOCKPAIR_READ_INDEX] = -1;
  200. ++loop->intern_nevents;
  201. return 0;
  202. }
  203. static void hloop_destroy_sockpair(hloop_t* loop) {
  204. SAFE_CLOSESOCKET(loop->sockpair[SOCKPAIR_READ_INDEX]);
  205. SAFE_CLOSESOCKET(loop->sockpair[SOCKPAIR_WRITE_INDEX]);
  206. }
  207. void hloop_post_event(hloop_t* loop, hevent_t* ev) {
  208. if (ev->loop == NULL) {
  209. ev->loop = loop;
  210. }
  211. if (ev->event_type == 0) {
  212. ev->event_type = HEVENT_TYPE_CUSTOM;
  213. }
  214. if (ev->event_id == 0) {
  215. ev->event_id = hloop_next_event_id();
  216. }
  217. int nsend = 0;
  218. hmutex_lock(&loop->custom_events_mutex);
  219. if (loop->sockpair[SOCKPAIR_WRITE_INDEX] == -1) {
  220. if (hloop_create_sockpair(loop) != 0) {
  221. goto unlock;
  222. }
  223. }
  224. nsend = send(loop->sockpair[SOCKPAIR_WRITE_INDEX], "e", 1, 0);
  225. if (nsend != 1) {
  226. hloge("send failed!");
  227. goto unlock;
  228. }
  229. event_queue_push_back(&loop->custom_events, ev);
  230. unlock:
  231. hmutex_unlock(&loop->custom_events_mutex);
  232. }
  233. static void hloop_init(hloop_t* loop) {
  234. #ifdef OS_WIN
  235. WSAInit();
  236. #endif
  237. #ifdef SIGPIPE
  238. // NOTE: if not ignore SIGPIPE, write twice when peer close will lead to exit process by SIGPIPE.
  239. signal(SIGPIPE, SIG_IGN);
  240. #endif
  241. loop->status = HLOOP_STATUS_STOP;
  242. loop->pid = hv_getpid();
  243. loop->tid = hv_gettid();
  244. // idles
  245. list_init(&loop->idles);
  246. // timers
  247. heap_init(&loop->timers, timers_compare);
  248. // ios
  249. io_array_init(&loop->ios, IO_ARRAY_INIT_SIZE);
  250. // readbuf
  251. loop->readbuf.len = HLOOP_READ_BUFSIZE;
  252. HV_ALLOC(loop->readbuf.base, loop->readbuf.len);
  253. // iowatcher
  254. iowatcher_init(loop);
  255. // custom_events
  256. hmutex_init(&loop->custom_events_mutex);
  257. event_queue_init(&loop->custom_events, CUSTOM_EVENT_QUEUE_INIT_SIZE);
  258. // NOTE: hloop_create_sockpair when hloop_post_event or hloop_run
  259. loop->sockpair[0] = loop->sockpair[1] = -1;
  260. // NOTE: init start_time here, because htimer_add use it.
  261. loop->start_ms = gettimeofday_ms();
  262. loop->start_hrtime = loop->cur_hrtime = gethrtime_us();
  263. }
  264. static void hloop_cleanup(hloop_t* loop) {
  265. // pendings
  266. printd("cleanup pendings...\n");
  267. for (int i = 0; i < HEVENT_PRIORITY_SIZE; ++i) {
  268. loop->pendings[i] = NULL;
  269. }
  270. // ios
  271. printd("cleanup ios...\n");
  272. for (int i = 0; i < loop->ios.maxsize; ++i) {
  273. hio_t* io = loop->ios.ptr[i];
  274. if (io) {
  275. hio_free(io);
  276. }
  277. }
  278. io_array_cleanup(&loop->ios);
  279. // idles
  280. printd("cleanup idles...\n");
  281. struct list_node* node = loop->idles.next;
  282. hidle_t* idle;
  283. while (node != &loop->idles) {
  284. idle = IDLE_ENTRY(node);
  285. node = node->next;
  286. HV_FREE(idle);
  287. }
  288. list_init(&loop->idles);
  289. // timers
  290. printd("cleanup timers...\n");
  291. htimer_t* timer;
  292. while (loop->timers.root) {
  293. timer = TIMER_ENTRY(loop->timers.root);
  294. heap_dequeue(&loop->timers);
  295. HV_FREE(timer);
  296. }
  297. heap_init(&loop->timers, NULL);
  298. // readbuf
  299. if (loop->readbuf.base && loop->readbuf.len) {
  300. HV_FREE(loop->readbuf.base);
  301. loop->readbuf.base = NULL;
  302. loop->readbuf.len = 0;
  303. }
  304. // iowatcher
  305. iowatcher_cleanup(loop);
  306. // custom_events
  307. hmutex_lock(&loop->custom_events_mutex);
  308. hloop_destroy_sockpair(loop);
  309. event_queue_cleanup(&loop->custom_events);
  310. hmutex_unlock(&loop->custom_events_mutex);
  311. hmutex_destroy(&loop->custom_events_mutex);
  312. }
  313. hloop_t* hloop_new(int flags) {
  314. hloop_t* loop;
  315. HV_ALLOC_SIZEOF(loop);
  316. hloop_init(loop);
  317. loop->flags |= flags;
  318. return loop;
  319. }
  320. void hloop_free(hloop_t** pp) {
  321. if (pp && *pp) {
  322. hloop_cleanup(*pp);
  323. HV_FREE(*pp);
  324. *pp = NULL;
  325. }
  326. }
  327. // while (loop->status) { hloop_process_events(loop); }
  328. int hloop_run(hloop_t* loop) {
  329. if (loop == NULL) return -1;
  330. if (loop->status == HLOOP_STATUS_RUNNING) return -2;
  331. loop->status = HLOOP_STATUS_RUNNING;
  332. loop->pid = hv_getpid();
  333. loop->tid = hv_gettid();
  334. if (loop->intern_nevents == 0) {
  335. hmutex_lock(&loop->custom_events_mutex);
  336. if (loop->sockpair[SOCKPAIR_WRITE_INDEX] == -1) {
  337. hloop_create_sockpair(loop);
  338. }
  339. hmutex_unlock(&loop->custom_events_mutex);
  340. #ifdef DEBUG
  341. htimer_add(loop, hloop_stat_timer_cb, HLOOP_STAT_TIMEOUT, INFINITE);
  342. ++loop->intern_nevents;
  343. #endif
  344. }
  345. while (loop->status != HLOOP_STATUS_STOP) {
  346. if (loop->status == HLOOP_STATUS_PAUSE) {
  347. hv_msleep(HLOOP_PAUSE_TIME);
  348. hloop_update_time(loop);
  349. continue;
  350. }
  351. ++loop->loop_cnt;
  352. if ((loop->flags & HLOOP_FLAG_QUIT_WHEN_NO_ACTIVE_EVENTS) &&
  353. loop->nactives <= loop->intern_nevents) {
  354. break;
  355. }
  356. hloop_process_events(loop);
  357. if (loop->flags & HLOOP_FLAG_RUN_ONCE) {
  358. break;
  359. }
  360. }
  361. loop->status = HLOOP_STATUS_STOP;
  362. loop->end_hrtime = gethrtime_us();
  363. if (loop->flags & HLOOP_FLAG_AUTO_FREE) {
  364. hloop_cleanup(loop);
  365. HV_FREE(loop);
  366. }
  367. return 0;
  368. }
  369. int hloop_wakeup(hloop_t* loop) {
  370. hevent_t ev;
  371. memset(&ev, 0, sizeof(ev));
  372. hloop_post_event(loop, &ev);
  373. return 0;
  374. }
  375. int hloop_stop(hloop_t* loop) {
  376. if (hv_gettid() != loop->tid) {
  377. hloop_wakeup(loop);
  378. }
  379. loop->status = HLOOP_STATUS_STOP;
  380. return 0;
  381. }
  382. int hloop_pause(hloop_t* loop) {
  383. if (loop->status == HLOOP_STATUS_RUNNING) {
  384. loop->status = HLOOP_STATUS_PAUSE;
  385. }
  386. return 0;
  387. }
  388. int hloop_resume(hloop_t* loop) {
  389. if (loop->status == HLOOP_STATUS_PAUSE) {
  390. loop->status = HLOOP_STATUS_RUNNING;
  391. }
  392. return 0;
  393. }
  394. hloop_status_e hloop_status(hloop_t* loop) {
  395. return loop->status;
  396. }
  397. void hloop_update_time(hloop_t* loop) {
  398. loop->cur_hrtime = gethrtime_us();
  399. if (ABS((int64_t)hloop_now(loop) - (int64_t)time(NULL)) > 1) {
  400. // systemtime changed, we adjust start_ms
  401. loop->start_ms = gettimeofday_ms() - (loop->cur_hrtime - loop->start_hrtime) / 1000;
  402. }
  403. }
  404. uint64_t hloop_now(hloop_t* loop) {
  405. return loop->start_ms / 1000 + (loop->cur_hrtime - loop->start_hrtime) / 1000000;
  406. }
  407. uint64_t hloop_now_ms(hloop_t* loop) {
  408. return loop->start_ms + (loop->cur_hrtime - loop->start_hrtime) / 1000;
  409. }
  410. uint64_t hloop_now_hrtime(hloop_t* loop) {
  411. return loop->start_ms * 1000 + (loop->cur_hrtime - loop->start_hrtime);
  412. }
  413. uint64_t hio_last_read_time(hio_t* io) {
  414. hloop_t* loop = io->loop;
  415. return loop->start_ms + (io->last_read_hrtime - loop->start_hrtime) / 1000;
  416. }
  417. uint64_t hio_last_write_time(hio_t* io) {
  418. hloop_t* loop = io->loop;
  419. return loop->start_ms + (io->last_write_hrtime - loop->start_hrtime) / 1000;
  420. }
  421. long hloop_pid(hloop_t* loop) {
  422. return loop->pid;
  423. }
  424. long hloop_tid(hloop_t* loop) {
  425. return loop->tid;
  426. }
  427. void hloop_set_userdata(hloop_t* loop, void* userdata) {
  428. loop->userdata = userdata;
  429. }
  430. void* hloop_userdata(hloop_t* loop) {
  431. return loop->userdata;
  432. }
  433. hidle_t* hidle_add(hloop_t* loop, hidle_cb cb, uint32_t repeat) {
  434. hidle_t* idle;
  435. HV_ALLOC_SIZEOF(idle);
  436. idle->event_type = HEVENT_TYPE_IDLE;
  437. idle->priority = HEVENT_LOWEST_PRIORITY;
  438. idle->repeat = repeat;
  439. list_add(&idle->node, &loop->idles);
  440. EVENT_ADD(loop, idle, cb);
  441. loop->nidles++;
  442. return idle;
  443. }
  444. static void __hidle_del(hidle_t* idle) {
  445. if (idle->destroy) return;
  446. idle->destroy = 1;
  447. list_del(&idle->node);
  448. idle->loop->nidles--;
  449. }
  450. void hidle_del(hidle_t* idle) {
  451. if (!idle->active) return;
  452. __hidle_del(idle);
  453. EVENT_DEL(idle);
  454. }
  455. htimer_t* htimer_add(hloop_t* loop, htimer_cb cb, uint32_t timeout, uint32_t repeat) {
  456. if (timeout == 0) return NULL;
  457. htimeout_t* timer;
  458. HV_ALLOC_SIZEOF(timer);
  459. timer->event_type = HEVENT_TYPE_TIMEOUT;
  460. timer->priority = HEVENT_HIGHEST_PRIORITY;
  461. timer->repeat = repeat;
  462. timer->timeout = timeout;
  463. hloop_update_time(loop);
  464. timer->next_timeout = hloop_now_hrtime(loop) + (uint64_t)timeout*1000;
  465. // NOTE: Limit granularity to 100ms
  466. if (timeout >= 1000 && timeout % 100 == 0) {
  467. timer->next_timeout = timer->next_timeout / 100000 * 100000;
  468. }
  469. heap_insert(&loop->timers, &timer->node);
  470. EVENT_ADD(loop, timer, cb);
  471. loop->ntimers++;
  472. return (htimer_t*)timer;
  473. }
  474. void htimer_reset(htimer_t* timer) {
  475. if (timer->event_type != HEVENT_TYPE_TIMEOUT) {
  476. return;
  477. }
  478. hloop_t* loop = timer->loop;
  479. htimeout_t* timeout = (htimeout_t*)timer;
  480. if (timer->destroy) {
  481. loop->ntimers++;
  482. } else {
  483. heap_remove(&loop->timers, &timer->node);
  484. }
  485. if (timer->repeat == 0) {
  486. timer->repeat = 1;
  487. }
  488. timer->next_timeout = hloop_now_hrtime(loop) + (uint64_t)timeout->timeout*1000;
  489. // NOTE: Limit granularity to 100ms
  490. if (timeout->timeout >= 1000 && timeout->timeout % 100 == 0) {
  491. timer->next_timeout = timer->next_timeout / 100000 * 100000;
  492. }
  493. heap_insert(&loop->timers, &timer->node);
  494. EVENT_RESET(timer);
  495. }
  496. htimer_t* htimer_add_period(hloop_t* loop, htimer_cb cb,
  497. int8_t minute, int8_t hour, int8_t day,
  498. int8_t week, int8_t month, uint32_t repeat) {
  499. if (minute > 59 || hour > 23 || day > 31 || week > 6 || month > 12) {
  500. return NULL;
  501. }
  502. hperiod_t* timer;
  503. HV_ALLOC_SIZEOF(timer);
  504. timer->event_type = HEVENT_TYPE_PERIOD;
  505. timer->priority = HEVENT_HIGH_PRIORITY;
  506. timer->repeat = repeat;
  507. timer->minute = minute;
  508. timer->hour = hour;
  509. timer->day = day;
  510. timer->month = month;
  511. timer->week = week;
  512. timer->next_timeout = (uint64_t)cron_next_timeout(minute, hour, day, week, month) * 1000000;
  513. heap_insert(&loop->timers, &timer->node);
  514. EVENT_ADD(loop, timer, cb);
  515. loop->ntimers++;
  516. return (htimer_t*)timer;
  517. }
  518. static void __htimer_del(htimer_t* timer) {
  519. if (timer->destroy) return;
  520. heap_remove(&timer->loop->timers, &timer->node);
  521. timer->loop->ntimers--;
  522. timer->destroy = 1;
  523. }
  524. void htimer_del(htimer_t* timer) {
  525. if (!timer->active) return;
  526. __htimer_del(timer);
  527. EVENT_DEL(timer);
  528. }
  529. const char* hio_engine() {
  530. #ifdef EVENT_SELECT
  531. return "select";
  532. #elif defined(EVENT_POLL)
  533. return "poll";
  534. #elif defined(EVENT_EPOLL)
  535. return "epoll";
  536. #elif defined(EVENT_KQUEUE)
  537. return "kqueue";
  538. #elif defined(EVENT_IOCP)
  539. return "iocp";
  540. #elif defined(EVENT_PORT)
  541. return "evport";
  542. #else
  543. return "noevent";
  544. #endif
  545. }
  546. hio_t* hio_get(hloop_t* loop, int fd) {
  547. if (fd >= loop->ios.maxsize) {
  548. int newsize = ceil2e(fd);
  549. io_array_resize(&loop->ios, newsize > fd ? newsize : 2*fd);
  550. }
  551. hio_t* io = loop->ios.ptr[fd];
  552. if (io == NULL) {
  553. HV_ALLOC_SIZEOF(io);
  554. hio_init(io);
  555. io->event_type = HEVENT_TYPE_IO;
  556. io->loop = loop;
  557. io->fd = fd;
  558. loop->ios.ptr[fd] = io;
  559. }
  560. if (!io->ready) {
  561. hio_ready(io);
  562. }
  563. return io;
  564. }
  565. void hio_detach(hio_t* io) {
  566. hloop_t* loop = io->loop;
  567. int fd = io->fd;
  568. assert(loop != NULL && fd < loop->ios.maxsize);
  569. loop->ios.ptr[fd] = NULL;
  570. }
  571. void hio_attach(hloop_t* loop, hio_t* io) {
  572. int fd = io->fd;
  573. if (fd >= loop->ios.maxsize) {
  574. int newsize = ceil2e(fd);
  575. io_array_resize(&loop->ios, newsize > fd ? newsize : 2*fd);
  576. }
  577. // NOTE: hio was not freed for reused when closed, but attached hio can't be reused,
  578. // so we need to free it if fd exists to avoid memory leak.
  579. hio_t* preio = loop->ios.ptr[fd];
  580. if (preio != NULL && preio != io) {
  581. hio_free(preio);
  582. }
  583. io->loop = loop;
  584. // NOTE: use new_loop readbuf
  585. io->readbuf.base = loop->readbuf.base;
  586. io->readbuf.len = loop->readbuf.len;
  587. loop->ios.ptr[fd] = io;
  588. }
  589. bool hio_exists(hloop_t* loop, int fd) {
  590. if (fd >= loop->ios.maxsize) {
  591. return false;
  592. }
  593. return loop->ios.ptr[fd] != NULL;
  594. }
  595. int hio_add(hio_t* io, hio_cb cb, int events) {
  596. printd("hio_add fd=%d io->events=%d events=%d\n", io->fd, io->events, events);
  597. #ifdef OS_WIN
  598. // Windows iowatcher not work on stdio
  599. if (io->fd < 3) return -1;
  600. #endif
  601. hloop_t* loop = io->loop;
  602. if (!io->active) {
  603. EVENT_ADD(loop, io, cb);
  604. loop->nios++;
  605. }
  606. if (!io->ready) {
  607. hio_ready(io);
  608. }
  609. if (cb) {
  610. io->cb = (hevent_cb)cb;
  611. }
  612. if (!(io->events & events)) {
  613. iowatcher_add_event(loop, io->fd, events);
  614. io->events |= events;
  615. }
  616. return 0;
  617. }
  618. int hio_del(hio_t* io, int events) {
  619. printd("hio_del fd=%d io->events=%d events=%d\n", io->fd, io->events, events);
  620. #ifdef OS_WIN
  621. // Windows iowatcher not work on stdio
  622. if (io->fd < 3) return -1;
  623. #endif
  624. if (!io->active) return -1;
  625. if (io->events & events) {
  626. iowatcher_del_event(io->loop, io->fd, events);
  627. io->events &= ~events;
  628. }
  629. if (io->events == 0) {
  630. io->loop->nios--;
  631. // NOTE: not EVENT_DEL, avoid free
  632. EVENT_INACTIVE(io);
  633. }
  634. return 0;
  635. }
  636. static void hio_close_event_cb(hevent_t* ev) {
  637. hio_t* io = (hio_t*)ev->userdata;
  638. uint32_t id = (uintptr_t)ev->privdata;
  639. if (io->id != id) return;
  640. hio_close(io);
  641. }
  642. int hio_close_async(hio_t* io) {
  643. hevent_t ev;
  644. memset(&ev, 0, sizeof(ev));
  645. ev.cb = hio_close_event_cb;
  646. ev.userdata = io;
  647. ev.privdata = (void*)(uintptr_t)io->id;
  648. hloop_post_event(io->loop, &ev);
  649. return 0;
  650. }
  651. //------------------high-level apis-------------------------------------------
  652. hio_t* hread(hloop_t* loop, int fd, void* buf, size_t len, hread_cb read_cb) {
  653. hio_t* io = hio_get(loop, fd);
  654. assert(io != NULL);
  655. if (buf && len) {
  656. io->readbuf.base = (char*)buf;
  657. io->readbuf.len = len;
  658. }
  659. if (read_cb) {
  660. io->read_cb = read_cb;
  661. }
  662. hio_read(io);
  663. return io;
  664. }
  665. hio_t* hwrite(hloop_t* loop, int fd, const void* buf, size_t len, hwrite_cb write_cb) {
  666. hio_t* io = hio_get(loop, fd);
  667. assert(io != NULL);
  668. if (write_cb) {
  669. io->write_cb = write_cb;
  670. }
  671. hio_write(io, buf, len);
  672. return io;
  673. }
  674. hio_t* haccept(hloop_t* loop, int listenfd, haccept_cb accept_cb) {
  675. hio_t* io = hio_get(loop, listenfd);
  676. assert(io != NULL);
  677. if (accept_cb) {
  678. io->accept_cb = accept_cb;
  679. }
  680. hio_accept(io);
  681. return io;
  682. }
  683. hio_t* hconnect (hloop_t* loop, int connfd, hconnect_cb connect_cb) {
  684. hio_t* io = hio_get(loop, connfd);
  685. assert(io != NULL);
  686. if (connect_cb) {
  687. io->connect_cb = connect_cb;
  688. }
  689. hio_connect(io);
  690. return io;
  691. }
  692. void hclose (hloop_t* loop, int fd) {
  693. hio_t* io = hio_get(loop, fd);
  694. assert(io != NULL);
  695. hio_close(io);
  696. }
  697. hio_t* hrecv (hloop_t* loop, int connfd, void* buf, size_t len, hread_cb read_cb) {
  698. //hio_t* io = hio_get(loop, connfd);
  699. //assert(io != NULL);
  700. //io->recv = 1;
  701. //if (io->io_type != HIO_TYPE_SSL) {
  702. //io->io_type = HIO_TYPE_TCP;
  703. //}
  704. return hread(loop, connfd, buf, len, read_cb);
  705. }
  706. hio_t* hsend (hloop_t* loop, int connfd, const void* buf, size_t len, hwrite_cb write_cb) {
  707. //hio_t* io = hio_get(loop, connfd);
  708. //assert(io != NULL);
  709. //io->send = 1;
  710. //if (io->io_type != HIO_TYPE_SSL) {
  711. //io->io_type = HIO_TYPE_TCP;
  712. //}
  713. return hwrite(loop, connfd, buf, len, write_cb);
  714. }
  715. hio_t* hrecvfrom (hloop_t* loop, int sockfd, void* buf, size_t len, hread_cb read_cb) {
  716. //hio_t* io = hio_get(loop, sockfd);
  717. //assert(io != NULL);
  718. //io->recvfrom = 1;
  719. //io->io_type = HIO_TYPE_UDP;
  720. return hread(loop, sockfd, buf, len, read_cb);
  721. }
  722. hio_t* hsendto (hloop_t* loop, int sockfd, const void* buf, size_t len, hwrite_cb write_cb) {
  723. //hio_t* io = hio_get(loop, sockfd);
  724. //assert(io != NULL);
  725. //io->sendto = 1;
  726. //io->io_type = HIO_TYPE_UDP;
  727. return hwrite(loop, sockfd, buf, len, write_cb);
  728. }
  729. //-----------------top-level apis---------------------------------------------
  730. hio_t* hio_create_socket(hloop_t* loop, const char* host, int port, hio_type_e type, hio_side_e side) {
  731. int sock_type = type & HIO_TYPE_SOCK_STREAM ? SOCK_STREAM :
  732. type & HIO_TYPE_SOCK_DGRAM ? SOCK_DGRAM :
  733. type & HIO_TYPE_SOCK_RAW ? SOCK_RAW : -1;
  734. if (sock_type == -1) return NULL;
  735. sockaddr_u addr;
  736. memset(&addr, 0, sizeof(addr));
  737. int ret = -1;
  738. #ifdef ENABLE_UDS
  739. if (port <= 0) {
  740. sockaddr_set_path(&addr, host);
  741. ret = 0;
  742. }
  743. #endif
  744. if (port > 0) {
  745. ret = sockaddr_set_ipport(&addr, host, port);
  746. }
  747. if (ret != 0) {
  748. // fprintf(stderr, "unknown host: %s\n", host);
  749. return NULL;
  750. }
  751. int sockfd = socket(addr.sa.sa_family, sock_type, 0);
  752. if (sockfd < 0) {
  753. perror("socket");
  754. return NULL;
  755. }
  756. hio_t* io = NULL;
  757. if (side == HIO_SERVER_SIDE) {
  758. #ifdef SO_REUSEADDR
  759. // NOTE: SO_REUSEADDR allow to reuse sockaddr of TIME_WAIT status
  760. int reuseaddr = 1;
  761. if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuseaddr, sizeof(int)) < 0) {
  762. perror("setsockopt");
  763. closesocket(sockfd);
  764. return NULL;
  765. }
  766. #endif
  767. if (bind(sockfd, &addr.sa, sockaddr_len(&addr)) < 0) {
  768. perror("bind");
  769. closesocket(sockfd);
  770. return NULL;
  771. }
  772. if (sock_type == SOCK_STREAM) {
  773. if (listen(sockfd, SOMAXCONN) < 0) {
  774. perror("listen");
  775. closesocket(sockfd);
  776. return NULL;
  777. }
  778. }
  779. }
  780. io = hio_get(loop, sockfd);
  781. assert(io != NULL);
  782. io->io_type = type;
  783. if (side == HIO_SERVER_SIDE) {
  784. hio_set_localaddr(io, &addr.sa, sockaddr_len(&addr));
  785. } else {
  786. hio_set_peeraddr(io, &addr.sa, sockaddr_len(&addr));
  787. }
  788. return io;
  789. }
  790. hio_t* hloop_create_tcp_server (hloop_t* loop, const char* host, int port, haccept_cb accept_cb) {
  791. hio_t* io = hio_create_socket(loop, host, port, HIO_TYPE_TCP, HIO_SERVER_SIDE);
  792. if (io == NULL) return NULL;
  793. hio_setcb_accept(io, accept_cb);
  794. hio_accept(io);
  795. return io;
  796. }
  797. hio_t* hloop_create_tcp_client (hloop_t* loop, const char* host, int port, hconnect_cb connect_cb) {
  798. hio_t* io = hio_create_socket(loop, host, port, HIO_TYPE_TCP, HIO_CLIENT_SIDE);
  799. if (io == NULL) return NULL;
  800. hio_setcb_connect(io, connect_cb);
  801. hio_connect(io);
  802. return io;
  803. }
  804. hio_t* hloop_create_ssl_server (hloop_t* loop, const char* host, int port, haccept_cb accept_cb) {
  805. hio_t* io = hio_create_socket(loop, host, port, HIO_TYPE_SSL, HIO_SERVER_SIDE);
  806. if (io == NULL) return NULL;
  807. hio_setcb_accept(io, accept_cb);
  808. hio_accept(io);
  809. return io;
  810. }
  811. hio_t* hloop_create_ssl_client (hloop_t* loop, const char* host, int port, hconnect_cb connect_cb) {
  812. hio_t* io = hio_create_socket(loop, host, port, HIO_TYPE_SSL, HIO_CLIENT_SIDE);
  813. if (io == NULL) return NULL;
  814. hio_setcb_connect(io, connect_cb);
  815. hio_connect(io);
  816. return io;
  817. }
  818. hio_t* hloop_create_udp_server(hloop_t* loop, const char* host, int port) {
  819. return hio_create_socket(loop, host, port, HIO_TYPE_UDP, HIO_SERVER_SIDE);
  820. }
  821. hio_t* hloop_create_udp_client(hloop_t* loop, const char* host, int port) {
  822. return hio_create_socket(loop, host, port, HIO_TYPE_UDP, HIO_CLIENT_SIDE);
  823. }