nio.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. #include "iowatcher.h"
  2. #ifndef EVENT_IOCP
  3. #include "hevent.h"
  4. #include "hsocket.h"
  5. #include "hssl.h"
  6. #include "hlog.h"
  7. #include "hthread.h"
  8. #include "unpack.h"
  9. static void __connect_timeout_cb(htimer_t* timer) {
  10. hio_t* io = (hio_t*)timer->privdata;
  11. if (io) {
  12. char localaddrstr[SOCKADDR_STRLEN] = {0};
  13. char peeraddrstr[SOCKADDR_STRLEN] = {0};
  14. hlogw("connect timeout [%s] <=> [%s]",
  15. SOCKADDR_STR(io->localaddr, localaddrstr),
  16. SOCKADDR_STR(io->peeraddr, peeraddrstr));
  17. io->error = ETIMEDOUT;
  18. hio_close(io);
  19. }
  20. }
  21. static void __close_timeout_cb(htimer_t* timer) {
  22. hio_t* io = (hio_t*)timer->privdata;
  23. if (io) {
  24. char localaddrstr[SOCKADDR_STRLEN] = {0};
  25. char peeraddrstr[SOCKADDR_STRLEN] = {0};
  26. hlogw("close timeout [%s] <=> [%s]",
  27. SOCKADDR_STR(io->localaddr, localaddrstr),
  28. SOCKADDR_STR(io->peeraddr, peeraddrstr));
  29. io->error = ETIMEDOUT;
  30. hio_close(io);
  31. }
  32. }
  33. static void __accept_cb(hio_t* io) {
  34. hio_accept_cb(io);
  35. }
  36. static void __connect_cb(hio_t* io) {
  37. hio_del_connect_timer(io);
  38. hio_connect_cb(io);
  39. }
  40. static void __read_cb(hio_t* io, void* buf, int readbytes) {
  41. // printd("> %.*s\n", readbytes, buf);
  42. if (io->keepalive_timer) {
  43. htimer_reset(io->keepalive_timer);
  44. }
  45. if (io->unpack_setting) {
  46. hio_unpack(io, buf, readbytes);
  47. } else {
  48. if (io->read_once) {
  49. hio_read_stop(io);
  50. }
  51. #if WITH_KCP
  52. if (io->io_type == HIO_TYPE_KCP) {
  53. hio_read_kcp(io, buf, readbytes);
  54. return;
  55. }
  56. #endif
  57. hio_read_cb(io, buf, readbytes);
  58. }
  59. // readbuf autosize
  60. if (io->small_readbytes_cnt >= 3) {
  61. io->small_readbytes_cnt = 0;
  62. size_t small_size = io->readbuf.len / 2;
  63. io->readbuf.base = (char*)safe_realloc(io->readbuf.base, small_size, io->readbuf.len);
  64. io->readbuf.len = small_size;
  65. }
  66. }
  67. static void __write_cb(hio_t* io, const void* buf, int writebytes) {
  68. // printd("< %.*s\n", writebytes, buf);
  69. if (io->keepalive_timer) {
  70. htimer_reset(io->keepalive_timer);
  71. }
  72. hio_write_cb(io, buf, writebytes);
  73. }
  74. static void __close_cb(hio_t* io) {
  75. // printd("close fd=%d\n", io->fd);
  76. hio_del_connect_timer(io);
  77. hio_del_close_timer(io);
  78. hio_del_keepalive_timer(io);
  79. hio_del_heartbeat_timer(io);
  80. hio_close_cb(io);
  81. }
  82. static void ssl_server_handshake(hio_t* io) {
  83. printd("ssl server handshake...\n");
  84. int ret = hssl_accept(io->ssl);
  85. if (ret == 0) {
  86. // handshake finish
  87. iowatcher_del_event(io->loop, io->fd, HV_READ);
  88. io->events &= ~HV_READ;
  89. io->cb = NULL;
  90. printd("ssl handshake finished.\n");
  91. __accept_cb(io);
  92. }
  93. else if (ret == HSSL_WANT_READ) {
  94. if ((io->events & HV_READ) == 0) {
  95. hio_add(io, ssl_server_handshake, HV_READ);
  96. }
  97. }
  98. else {
  99. hloge("ssl handshake failed: %d", ret);
  100. hio_close(io);
  101. }
  102. }
  103. static void ssl_client_handshake(hio_t* io) {
  104. printd("ssl client handshake...\n");
  105. int ret = hssl_connect(io->ssl);
  106. if (ret == 0) {
  107. // handshake finish
  108. iowatcher_del_event(io->loop, io->fd, HV_READ);
  109. io->events &= ~HV_READ;
  110. io->cb = NULL;
  111. printd("ssl handshake finished.\n");
  112. __connect_cb(io);
  113. }
  114. else if (ret == HSSL_WANT_READ) {
  115. if ((io->events & HV_READ) == 0) {
  116. hio_add(io, ssl_client_handshake, HV_READ);
  117. }
  118. }
  119. else {
  120. hloge("ssl handshake failed: %d", ret);
  121. hio_close(io);
  122. }
  123. }
  124. static void nio_accept(hio_t* io) {
  125. // printd("nio_accept listenfd=%d\n", io->fd);
  126. int connfd = 0, err = 0;
  127. socklen_t addrlen;
  128. accept:
  129. addrlen = sizeof(sockaddr_u);
  130. connfd = accept(io->fd, io->peeraddr, &addrlen);
  131. hio_t* connio = NULL;
  132. if (connfd < 0) {
  133. err = socket_errno();
  134. if (err == EAGAIN) {
  135. //goto accept_done;
  136. return;
  137. } else {
  138. perror("accept");
  139. io->error = err;
  140. goto accept_error;
  141. }
  142. }
  143. addrlen = sizeof(sockaddr_u);
  144. getsockname(connfd, io->localaddr, &addrlen);
  145. connio = hio_get(io->loop, connfd);
  146. // NOTE: inherit from listenio
  147. connio->accept_cb = io->accept_cb;
  148. connio->userdata = io->userdata;
  149. if (io->unpack_setting) {
  150. hio_set_unpack(connio, io->unpack_setting);
  151. }
  152. if (io->io_type == HIO_TYPE_SSL) {
  153. if (connio->ssl == NULL) {
  154. hssl_ctx_t ssl_ctx = hssl_ctx_instance();
  155. if (ssl_ctx == NULL) {
  156. goto accept_error;
  157. }
  158. hssl_t ssl = hssl_new(ssl_ctx, connfd);
  159. if (ssl == NULL) {
  160. goto accept_error;
  161. }
  162. connio->ssl = ssl;
  163. }
  164. hio_enable_ssl(connio);
  165. ssl_server_handshake(connio);
  166. }
  167. else {
  168. // NOTE: SSL call accept_cb after handshake finished
  169. __accept_cb(connio);
  170. }
  171. goto accept;
  172. accept_error:
  173. hio_close(io);
  174. }
  175. static void nio_connect(hio_t* io) {
  176. // printd("nio_connect connfd=%d\n", io->fd);
  177. socklen_t addrlen = sizeof(sockaddr_u);
  178. int ret = getpeername(io->fd, io->peeraddr, &addrlen);
  179. if (ret < 0) {
  180. io->error = socket_errno();
  181. printd("connect failed: %s: %d\n", strerror(io->error), io->error);
  182. goto connect_failed;
  183. }
  184. else {
  185. addrlen = sizeof(sockaddr_u);
  186. getsockname(io->fd, io->localaddr, &addrlen);
  187. if (io->io_type == HIO_TYPE_SSL) {
  188. if (io->ssl == NULL) {
  189. hssl_ctx_t ssl_ctx = hssl_ctx_instance();
  190. if (ssl_ctx == NULL) {
  191. goto connect_failed;
  192. }
  193. hssl_t ssl = hssl_new(ssl_ctx, io->fd);
  194. if (ssl == NULL) {
  195. goto connect_failed;
  196. }
  197. io->ssl = ssl;
  198. }
  199. ssl_client_handshake(io);
  200. }
  201. else {
  202. // NOTE: SSL call connect_cb after handshake finished
  203. __connect_cb(io);
  204. }
  205. return;
  206. }
  207. connect_failed:
  208. hio_close(io);
  209. }
  210. static int __nio_read(hio_t* io, void* buf, int len) {
  211. int nread = 0;
  212. switch (io->io_type) {
  213. case HIO_TYPE_SSL:
  214. nread = hssl_read(io->ssl, buf, len);
  215. break;
  216. case HIO_TYPE_TCP:
  217. #ifdef OS_UNIX
  218. nread = read(io->fd, buf, len);
  219. #else
  220. nread = recv(io->fd, buf, len, 0);
  221. #endif
  222. break;
  223. case HIO_TYPE_UDP:
  224. case HIO_TYPE_KCP:
  225. case HIO_TYPE_IP:
  226. {
  227. socklen_t addrlen = sizeof(sockaddr_u);
  228. nread = recvfrom(io->fd, buf, len, 0, io->peeraddr, &addrlen);
  229. }
  230. break;
  231. default:
  232. nread = read(io->fd, buf, len);
  233. break;
  234. }
  235. // hlogd("read retval=%d", nread);
  236. return nread;
  237. }
  238. static int __nio_write(hio_t* io, const void* buf, int len) {
  239. int nwrite = 0;
  240. switch (io->io_type) {
  241. case HIO_TYPE_SSL:
  242. nwrite = hssl_write(io->ssl, buf, len);
  243. break;
  244. case HIO_TYPE_TCP:
  245. #ifdef OS_UNIX
  246. nwrite = write(io->fd, buf, len);
  247. #else
  248. nwrite = send(io->fd, buf, len, 0);
  249. #endif
  250. break;
  251. case HIO_TYPE_UDP:
  252. case HIO_TYPE_KCP:
  253. case HIO_TYPE_IP:
  254. nwrite = sendto(io->fd, buf, len, 0, io->peeraddr, SOCKADDR_LEN(io->peeraddr));
  255. break;
  256. default:
  257. nwrite = write(io->fd, buf, len);
  258. break;
  259. }
  260. // hlogd("write retval=%d", nwrite);
  261. return nwrite;
  262. }
  263. static void nio_read(hio_t* io) {
  264. // printd("nio_read fd=%d\n", io->fd);
  265. void* buf;
  266. int len = 0, nread = 0, err = 0;
  267. read:
  268. buf = io->readbuf.base + io->readbuf.offset;
  269. if (io->read_until) {
  270. len = io->read_until;
  271. } else {
  272. len = io->readbuf.len - io->readbuf.offset;
  273. }
  274. nread = __nio_read(io, buf, len);
  275. // printd("read retval=%d\n", nread);
  276. if (nread < 0) {
  277. err = socket_errno();
  278. if (err == EAGAIN) {
  279. // goto read_done;
  280. return;
  281. } else if (err == EMSGSIZE) {
  282. // ignore
  283. return;
  284. } else {
  285. // perror("read");
  286. io->error = err;
  287. goto read_error;
  288. }
  289. }
  290. if (nread == 0) {
  291. goto disconnect;
  292. }
  293. if (io->read_until) {
  294. io->readbuf.offset += nread;
  295. io->read_until -= nread;
  296. if (io->read_until == 0) {
  297. __read_cb(io, io->readbuf.base, io->readbuf.offset);
  298. io->readbuf.offset = 0;
  299. }
  300. } else {
  301. __read_cb(io, buf, nread);
  302. if (nread == len) {
  303. goto read;
  304. }
  305. }
  306. return;
  307. read_error:
  308. disconnect:
  309. hio_close(io);
  310. }
  311. static void nio_write(hio_t* io) {
  312. // printd("nio_write fd=%d\n", io->fd);
  313. int nwrite = 0, err = 0;
  314. hrecursive_mutex_lock(&io->write_mutex);
  315. write:
  316. if (write_queue_empty(&io->write_queue)) {
  317. hrecursive_mutex_unlock(&io->write_mutex);
  318. if (io->close) {
  319. io->close = 0;
  320. hio_close(io);
  321. }
  322. return;
  323. }
  324. offset_buf_t* pbuf = write_queue_front(&io->write_queue);
  325. char* buf = pbuf->base + pbuf->offset;
  326. int len = pbuf->len - pbuf->offset;
  327. nwrite = __nio_write(io, buf, len);
  328. // printd("write retval=%d\n", nwrite);
  329. if (nwrite < 0) {
  330. err = socket_errno();
  331. if (err == EAGAIN) {
  332. //goto write_done;
  333. hrecursive_mutex_unlock(&io->write_mutex);
  334. return;
  335. } else {
  336. // perror("write");
  337. io->error = err;
  338. goto write_error;
  339. }
  340. }
  341. if (nwrite == 0) {
  342. goto disconnect;
  343. }
  344. __write_cb(io, buf, nwrite);
  345. pbuf->offset += nwrite;
  346. io->write_queue_bytes -= nwrite;
  347. if (nwrite == len) {
  348. HV_FREE(pbuf->base);
  349. write_queue_pop_front(&io->write_queue);
  350. // write next
  351. goto write;
  352. }
  353. hrecursive_mutex_unlock(&io->write_mutex);
  354. return;
  355. write_error:
  356. disconnect:
  357. hrecursive_mutex_unlock(&io->write_mutex);
  358. hio_close(io);
  359. }
  360. static void hio_handle_events(hio_t* io) {
  361. if ((io->events & HV_READ) && (io->revents & HV_READ)) {
  362. if (io->accept) {
  363. nio_accept(io);
  364. }
  365. else {
  366. nio_read(io);
  367. }
  368. }
  369. if ((io->events & HV_WRITE) && (io->revents & HV_WRITE)) {
  370. // NOTE: del HV_WRITE, if write_queue empty
  371. hrecursive_mutex_lock(&io->write_mutex);
  372. if (write_queue_empty(&io->write_queue)) {
  373. iowatcher_del_event(io->loop, io->fd, HV_WRITE);
  374. io->events &= ~HV_WRITE;
  375. }
  376. hrecursive_mutex_unlock(&io->write_mutex);
  377. if (io->connect) {
  378. // NOTE: connect just do once
  379. // ONESHOT
  380. io->connect = 0;
  381. nio_connect(io);
  382. }
  383. else {
  384. nio_write(io);
  385. }
  386. }
  387. io->revents = 0;
  388. }
  389. int hio_accept(hio_t* io) {
  390. io->accept = 1;
  391. hio_add(io, hio_handle_events, HV_READ);
  392. return 0;
  393. }
  394. int hio_connect(hio_t* io) {
  395. int ret = connect(io->fd, io->peeraddr, SOCKADDR_LEN(io->peeraddr));
  396. #ifdef OS_WIN
  397. if (ret < 0 && socket_errno() != WSAEWOULDBLOCK) {
  398. #else
  399. if (ret < 0 && socket_errno() != EINPROGRESS) {
  400. #endif
  401. perror("connect");
  402. hio_close(io);
  403. return ret;
  404. }
  405. if (ret == 0) {
  406. // connect ok
  407. nio_connect(io);
  408. return 0;
  409. }
  410. int timeout = io->connect_timeout ? io->connect_timeout : HIO_DEFAULT_CONNECT_TIMEOUT;
  411. io->connect_timer = htimer_add(io->loop, __connect_timeout_cb, timeout, 1);
  412. io->connect_timer->privdata = io;
  413. io->connect = 1;
  414. return hio_add(io, hio_handle_events, HV_WRITE);
  415. }
  416. int hio_read (hio_t* io) {
  417. if (io->closed) {
  418. hloge("hio_read called but fd[%d] already closed!", io->fd);
  419. return -1;
  420. }
  421. return hio_add(io, hio_handle_events, HV_READ);
  422. }
  423. static void hio_write_event_cb(hevent_t* ev) {
  424. hio_t* io = (hio_t*)ev->userdata;
  425. if (io->closed) return;
  426. uint32_t id = (uintptr_t)ev->privdata;
  427. if (io->id != id) return;
  428. if (io->keepalive_timer) {
  429. htimer_reset(io->keepalive_timer);
  430. }
  431. }
  432. int hio_write (hio_t* io, const void* buf, size_t len) {
  433. if (io->closed) {
  434. hloge("hio_write called but fd[%d] already closed!", io->fd);
  435. return -1;
  436. }
  437. #if WITH_KCP
  438. if (io->io_type == HIO_TYPE_KCP) {
  439. return hio_write_kcp(io, buf, len);
  440. }
  441. #endif
  442. int nwrite = 0, err = 0;
  443. hrecursive_mutex_lock(&io->write_mutex);
  444. if (write_queue_empty(&io->write_queue)) {
  445. try_write:
  446. nwrite = __nio_write(io, buf, len);
  447. // printd("write retval=%d\n", nwrite);
  448. if (nwrite < 0) {
  449. err = socket_errno();
  450. if (err == EAGAIN) {
  451. nwrite = 0;
  452. hlogw("try_write failed, enqueue!");
  453. goto enqueue;
  454. } else {
  455. // perror("write");
  456. io->error = err;
  457. goto write_error;
  458. }
  459. }
  460. if (nwrite == 0) {
  461. goto disconnect;
  462. }
  463. // __write_cb(io, buf, nwrite);
  464. if (io->keepalive_timer) {
  465. if (hv_gettid() == io->loop->tid) {
  466. htimer_reset(io->keepalive_timer);
  467. } else {
  468. hevent_t ev;
  469. memset(&ev, 0, sizeof(ev));
  470. ev.cb = hio_write_event_cb;
  471. ev.userdata = io;
  472. ev.privdata = (void*)(uintptr_t)io->id;
  473. ev.priority = HEVENT_HIGH_PRIORITY;
  474. hloop_post_event(io->loop, &ev);
  475. }
  476. }
  477. hio_write_cb(io, buf, nwrite);
  478. if (nwrite == len) {
  479. //goto write_done;
  480. hrecursive_mutex_unlock(&io->write_mutex);
  481. return nwrite;
  482. }
  483. enqueue:
  484. hio_add(io, hio_handle_events, HV_WRITE);
  485. }
  486. if (nwrite < len) {
  487. offset_buf_t remain;
  488. remain.len = len;
  489. remain.offset = nwrite;
  490. // NOTE: free in nio_write
  491. HV_ALLOC(remain.base, remain.len);
  492. memcpy(remain.base, buf, remain.len);
  493. if (io->write_queue.maxsize == 0) {
  494. write_queue_init(&io->write_queue, 4);
  495. }
  496. write_queue_push_back(&io->write_queue, &remain);
  497. io->write_queue_bytes += remain.len - remain.offset;
  498. if (io->write_queue_bytes > WRITE_QUEUE_HIGH_WATER) {
  499. hlogw("write queue %u, total %u, over high water %u",
  500. (unsigned int)(remain.len - remain.offset),
  501. (unsigned int)io->write_queue_bytes,
  502. (unsigned int)WRITE_QUEUE_HIGH_WATER);
  503. }
  504. }
  505. hrecursive_mutex_unlock(&io->write_mutex);
  506. return nwrite;
  507. write_error:
  508. disconnect:
  509. hrecursive_mutex_unlock(&io->write_mutex);
  510. hio_close(io);
  511. return nwrite;
  512. }
  513. int hio_close (hio_t* io) {
  514. if (io->closed) return 0;
  515. if (hv_gettid() != io->loop->tid) {
  516. return hio_close_async(io);
  517. }
  518. hrecursive_mutex_lock(&io->write_mutex);
  519. if (!write_queue_empty(&io->write_queue) && io->error == 0 && io->close == 0) {
  520. hrecursive_mutex_unlock(&io->write_mutex);
  521. io->close = 1;
  522. hlogw("write_queue not empty, close later.");
  523. int timeout_ms = io->close_timeout ? io->close_timeout : HIO_DEFAULT_CLOSE_TIMEOUT;
  524. io->close_timer = htimer_add(io->loop, __close_timeout_cb, timeout_ms, 1);
  525. io->close_timer->privdata = io;
  526. return 0;
  527. }
  528. io->closed = 1;
  529. hio_done(io);
  530. __close_cb(io);
  531. if (io->ssl) {
  532. hssl_free(io->ssl);
  533. io->ssl = NULL;
  534. }
  535. if (io->io_type & HIO_TYPE_SOCKET) {
  536. closesocket(io->fd);
  537. }
  538. hrecursive_mutex_unlock(&io->write_mutex);
  539. return 0;
  540. }
  541. #endif