nio.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. #include "iowatcher.h"
  2. #ifndef EVENT_IOCP
  3. #include "hevent.h"
  4. #include "hsocket.h"
  5. #include "hssl.h"
  6. #include "hlog.h"
  7. #include "herr.h"
  8. #include "hthread.h"
  9. static void __connect_timeout_cb(htimer_t* timer) {
  10. hio_t* io = (hio_t*)timer->privdata;
  11. if (io) {
  12. char localaddrstr[SOCKADDR_STRLEN] = {0};
  13. char peeraddrstr[SOCKADDR_STRLEN] = {0};
  14. hlogw("connect timeout [%s] <=> [%s]",
  15. SOCKADDR_STR(io->localaddr, localaddrstr),
  16. SOCKADDR_STR(io->peeraddr, peeraddrstr));
  17. io->error = ETIMEDOUT;
  18. hio_close(io);
  19. }
  20. }
  21. static void __close_timeout_cb(htimer_t* timer) {
  22. hio_t* io = (hio_t*)timer->privdata;
  23. if (io) {
  24. char localaddrstr[SOCKADDR_STRLEN] = {0};
  25. char peeraddrstr[SOCKADDR_STRLEN] = {0};
  26. hlogw("close timeout [%s] <=> [%s]",
  27. SOCKADDR_STR(io->localaddr, localaddrstr),
  28. SOCKADDR_STR(io->peeraddr, peeraddrstr));
  29. io->error = ETIMEDOUT;
  30. hio_close(io);
  31. }
  32. }
  33. static void __accept_cb(hio_t* io) {
  34. hio_accept_cb(io);
  35. }
  36. static void __connect_cb(hio_t* io) {
  37. hio_del_connect_timer(io);
  38. hio_connect_cb(io);
  39. }
  40. static void __read_cb(hio_t* io, void* buf, int readbytes) {
  41. // printd("> %.*s\n", readbytes, buf);
  42. io->last_read_hrtime = io->loop->cur_hrtime;
  43. hio_handle_read(io, buf, readbytes);
  44. }
  45. static void __write_cb(hio_t* io, const void* buf, int writebytes) {
  46. // printd("< %.*s\n", writebytes, buf);
  47. io->last_write_hrtime = io->loop->cur_hrtime;
  48. hio_write_cb(io, buf, writebytes);
  49. }
  50. static void __close_cb(hio_t* io) {
  51. // printd("close fd=%d\n", io->fd);
  52. hio_del_connect_timer(io);
  53. hio_del_close_timer(io);
  54. hio_del_read_timer(io);
  55. hio_del_write_timer(io);
  56. hio_del_keepalive_timer(io);
  57. hio_del_heartbeat_timer(io);
  58. hio_close_cb(io);
  59. }
  60. static void ssl_server_handshake(hio_t* io) {
  61. printd("ssl server handshake...\n");
  62. int ret = hssl_accept(io->ssl);
  63. if (ret == 0) {
  64. // handshake finish
  65. iowatcher_del_event(io->loop, io->fd, HV_READ);
  66. io->events &= ~HV_READ;
  67. io->cb = NULL;
  68. printd("ssl handshake finished.\n");
  69. __accept_cb(io);
  70. }
  71. else if (ret == HSSL_WANT_READ) {
  72. if ((io->events & HV_READ) == 0) {
  73. hio_add(io, ssl_server_handshake, HV_READ);
  74. }
  75. }
  76. else {
  77. hloge("ssl handshake failed: %d", ret);
  78. io->error = ERR_SSL_HANDSHAKE;
  79. hio_close(io);
  80. }
  81. }
  82. static void ssl_client_handshake(hio_t* io) {
  83. printd("ssl client handshake...\n");
  84. int ret = hssl_connect(io->ssl);
  85. if (ret == 0) {
  86. // handshake finish
  87. iowatcher_del_event(io->loop, io->fd, HV_READ);
  88. io->events &= ~HV_READ;
  89. io->cb = NULL;
  90. printd("ssl handshake finished.\n");
  91. __connect_cb(io);
  92. }
  93. else if (ret == HSSL_WANT_READ) {
  94. if ((io->events & HV_READ) == 0) {
  95. hio_add(io, ssl_client_handshake, HV_READ);
  96. }
  97. }
  98. else {
  99. hloge("ssl handshake failed: %d", ret);
  100. io->error = ERR_SSL_HANDSHAKE;
  101. hio_close(io);
  102. }
  103. }
  104. static void nio_accept(hio_t* io) {
  105. // printd("nio_accept listenfd=%d\n", io->fd);
  106. int connfd = 0, err = 0, accept_cnt = 0;
  107. socklen_t addrlen;
  108. hio_t* connio = NULL;
  109. while (accept_cnt++ < 3) {
  110. addrlen = sizeof(sockaddr_u);
  111. connfd = accept(io->fd, io->peeraddr, &addrlen);
  112. if (connfd < 0) {
  113. err = socket_errno();
  114. if (err == EAGAIN || err == EINTR) {
  115. return;
  116. } else {
  117. perror("accept");
  118. io->error = err;
  119. goto accept_error;
  120. }
  121. }
  122. addrlen = sizeof(sockaddr_u);
  123. getsockname(connfd, io->localaddr, &addrlen);
  124. connio = hio_get(io->loop, connfd);
  125. // NOTE: inherit from listenio
  126. connio->accept_cb = io->accept_cb;
  127. connio->userdata = io->userdata;
  128. if (io->unpack_setting) {
  129. hio_set_unpack(connio, io->unpack_setting);
  130. }
  131. if (io->io_type == HIO_TYPE_SSL) {
  132. if (connio->ssl == NULL) {
  133. // io->ssl_ctx > g_ssl_ctx > hssl_ctx_new
  134. hssl_ctx_t ssl_ctx = NULL;
  135. if (io->ssl_ctx) {
  136. ssl_ctx = io->ssl_ctx;
  137. } else if (g_ssl_ctx) {
  138. ssl_ctx = g_ssl_ctx;
  139. } else {
  140. io->ssl_ctx = ssl_ctx = hssl_ctx_new(NULL);
  141. io->alloced_ssl_ctx = 1;
  142. }
  143. if (ssl_ctx == NULL) {
  144. io->error = ERR_NEW_SSL_CTX;
  145. goto accept_error;
  146. }
  147. hssl_t ssl = hssl_new(ssl_ctx, connfd);
  148. if (ssl == NULL) {
  149. io->error = ERR_NEW_SSL;
  150. goto accept_error;
  151. }
  152. connio->ssl = ssl;
  153. }
  154. hio_enable_ssl(connio);
  155. ssl_server_handshake(connio);
  156. }
  157. else {
  158. // NOTE: SSL call accept_cb after handshake finished
  159. __accept_cb(connio);
  160. }
  161. }
  162. return;
  163. accept_error:
  164. hloge("listenfd=%d accept error: %s:%d", io->fd, socket_strerror(io->error), io->error);
  165. hio_close(io);
  166. }
  167. static void nio_connect(hio_t* io) {
  168. // printd("nio_connect connfd=%d\n", io->fd);
  169. socklen_t addrlen = sizeof(sockaddr_u);
  170. int ret = getpeername(io->fd, io->peeraddr, &addrlen);
  171. if (ret < 0) {
  172. io->error = socket_errno();
  173. goto connect_error;
  174. }
  175. else {
  176. addrlen = sizeof(sockaddr_u);
  177. getsockname(io->fd, io->localaddr, &addrlen);
  178. if (io->io_type == HIO_TYPE_SSL) {
  179. if (io->ssl == NULL) {
  180. // io->ssl_ctx > g_ssl_ctx > hssl_ctx_new
  181. hssl_ctx_t ssl_ctx = NULL;
  182. if (io->ssl_ctx) {
  183. ssl_ctx = io->ssl_ctx;
  184. } else if (g_ssl_ctx) {
  185. ssl_ctx = g_ssl_ctx;
  186. } else {
  187. io->ssl_ctx = ssl_ctx = hssl_ctx_new(NULL);
  188. io->alloced_ssl_ctx = 1;
  189. }
  190. if (ssl_ctx == NULL) {
  191. io->error = ERR_NEW_SSL_CTX;
  192. goto connect_error;
  193. }
  194. hssl_t ssl = hssl_new(ssl_ctx, io->fd);
  195. if (ssl == NULL) {
  196. io->error = ERR_NEW_SSL;
  197. goto connect_error;
  198. }
  199. io->ssl = ssl;
  200. }
  201. ssl_client_handshake(io);
  202. }
  203. else {
  204. // NOTE: SSL call connect_cb after handshake finished
  205. __connect_cb(io);
  206. }
  207. return;
  208. }
  209. connect_error:
  210. hlogw("connfd=%d connect error: %s:%d\n", io->fd, socket_strerror(io->error), io->error);
  211. hio_close(io);
  212. }
  213. static int __nio_read(hio_t* io, void* buf, int len) {
  214. int nread = 0;
  215. switch (io->io_type) {
  216. case HIO_TYPE_SSL:
  217. nread = hssl_read(io->ssl, buf, len);
  218. break;
  219. case HIO_TYPE_TCP:
  220. #ifdef OS_UNIX
  221. nread = read(io->fd, buf, len);
  222. #else
  223. nread = recv(io->fd, buf, len, 0);
  224. #endif
  225. break;
  226. case HIO_TYPE_UDP:
  227. case HIO_TYPE_KCP:
  228. case HIO_TYPE_IP:
  229. {
  230. socklen_t addrlen = sizeof(sockaddr_u);
  231. nread = recvfrom(io->fd, buf, len, 0, io->peeraddr, &addrlen);
  232. }
  233. break;
  234. default:
  235. nread = read(io->fd, buf, len);
  236. break;
  237. }
  238. // hlogd("read retval=%d", nread);
  239. return nread;
  240. }
  241. static int __nio_write(hio_t* io, const void* buf, int len) {
  242. int nwrite = 0;
  243. switch (io->io_type) {
  244. case HIO_TYPE_SSL:
  245. nwrite = hssl_write(io->ssl, buf, len);
  246. break;
  247. case HIO_TYPE_TCP:
  248. #ifdef OS_UNIX
  249. nwrite = write(io->fd, buf, len);
  250. #else
  251. nwrite = send(io->fd, buf, len, 0);
  252. #endif
  253. break;
  254. case HIO_TYPE_UDP:
  255. case HIO_TYPE_KCP:
  256. case HIO_TYPE_IP:
  257. nwrite = sendto(io->fd, buf, len, 0, io->peeraddr, SOCKADDR_LEN(io->peeraddr));
  258. break;
  259. default:
  260. nwrite = write(io->fd, buf, len);
  261. break;
  262. }
  263. // hlogd("write retval=%d", nwrite);
  264. return nwrite;
  265. }
  266. static void nio_read(hio_t* io) {
  267. // printd("nio_read fd=%d\n", io->fd);
  268. void* buf;
  269. int len = 0, nread = 0, err = 0;
  270. read:
  271. buf = io->readbuf.base + io->readbuf.tail;
  272. if (io->read_flags & HIO_READ_UNTIL_LENGTH) {
  273. len = io->read_until_length - (io->readbuf.tail - io->readbuf.head);
  274. } else {
  275. len = io->readbuf.len - io->readbuf.tail;
  276. }
  277. assert(len > 0);
  278. nread = __nio_read(io, buf, len);
  279. // printd("read retval=%d\n", nread);
  280. if (nread < 0) {
  281. err = socket_errno();
  282. if (err == EAGAIN) {
  283. // goto read_done;
  284. return;
  285. } else if (err == EMSGSIZE) {
  286. // ignore
  287. return;
  288. } else {
  289. // perror("read");
  290. io->error = err;
  291. goto read_error;
  292. }
  293. }
  294. if (nread == 0) {
  295. goto disconnect;
  296. }
  297. io->readbuf.tail += nread;
  298. __read_cb(io, buf, nread);
  299. if (nread == len && !io->closed) {
  300. // read continue
  301. goto read;
  302. }
  303. return;
  304. read_error:
  305. disconnect:
  306. hio_close(io);
  307. }
  308. static void nio_write(hio_t* io) {
  309. // printd("nio_write fd=%d\n", io->fd);
  310. int nwrite = 0, err = 0;
  311. hrecursive_mutex_lock(&io->write_mutex);
  312. write:
  313. if (write_queue_empty(&io->write_queue)) {
  314. hrecursive_mutex_unlock(&io->write_mutex);
  315. if (io->close) {
  316. io->close = 0;
  317. hio_close(io);
  318. }
  319. return;
  320. }
  321. offset_buf_t* pbuf = write_queue_front(&io->write_queue);
  322. char* base = pbuf->base;
  323. char* buf = base + pbuf->offset;
  324. int len = pbuf->len - pbuf->offset;
  325. nwrite = __nio_write(io, buf, len);
  326. // printd("write retval=%d\n", nwrite);
  327. if (nwrite < 0) {
  328. err = socket_errno();
  329. if (err == EAGAIN) {
  330. hrecursive_mutex_unlock(&io->write_mutex);
  331. return;
  332. } else {
  333. // perror("write");
  334. io->error = err;
  335. goto write_error;
  336. }
  337. }
  338. if (nwrite == 0) {
  339. goto disconnect;
  340. }
  341. pbuf->offset += nwrite;
  342. io->write_bufsize -= nwrite;
  343. __write_cb(io, buf, nwrite);
  344. if (nwrite == len) {
  345. // NOTE: after write_cb, pbuf maybe invalid.
  346. // HV_FREE(pbuf->base);
  347. HV_FREE(base);
  348. write_queue_pop_front(&io->write_queue);
  349. if (!io->closed) {
  350. // write continue
  351. goto write;
  352. }
  353. }
  354. hrecursive_mutex_unlock(&io->write_mutex);
  355. return;
  356. write_error:
  357. disconnect:
  358. hrecursive_mutex_unlock(&io->write_mutex);
  359. hio_close(io);
  360. }
  361. static void hio_handle_events(hio_t* io) {
  362. if ((io->events & HV_READ) && (io->revents & HV_READ)) {
  363. if (io->accept) {
  364. nio_accept(io);
  365. }
  366. else {
  367. nio_read(io);
  368. }
  369. }
  370. if ((io->events & HV_WRITE) && (io->revents & HV_WRITE)) {
  371. // NOTE: del HV_WRITE, if write_queue empty
  372. hrecursive_mutex_lock(&io->write_mutex);
  373. if (write_queue_empty(&io->write_queue)) {
  374. iowatcher_del_event(io->loop, io->fd, HV_WRITE);
  375. io->events &= ~HV_WRITE;
  376. }
  377. hrecursive_mutex_unlock(&io->write_mutex);
  378. if (io->connect) {
  379. // NOTE: connect just do once
  380. // ONESHOT
  381. io->connect = 0;
  382. nio_connect(io);
  383. }
  384. else {
  385. nio_write(io);
  386. }
  387. }
  388. io->revents = 0;
  389. }
  390. int hio_accept(hio_t* io) {
  391. io->accept = 1;
  392. return hio_add(io, hio_handle_events, HV_READ);
  393. }
  394. int hio_connect(hio_t* io) {
  395. int ret = connect(io->fd, io->peeraddr, SOCKADDR_LEN(io->peeraddr));
  396. #ifdef OS_WIN
  397. if (ret < 0 && socket_errno() != WSAEWOULDBLOCK) {
  398. #else
  399. if (ret < 0 && socket_errno() != EINPROGRESS) {
  400. #endif
  401. perror("connect");
  402. io->error = socket_errno();
  403. hio_close(io);
  404. return ret;
  405. }
  406. if (ret == 0) {
  407. // connect ok
  408. nio_connect(io);
  409. return 0;
  410. }
  411. int timeout = io->connect_timeout ? io->connect_timeout : HIO_DEFAULT_CONNECT_TIMEOUT;
  412. io->connect_timer = htimer_add(io->loop, __connect_timeout_cb, timeout, 1);
  413. io->connect_timer->privdata = io;
  414. io->connect = 1;
  415. return hio_add(io, hio_handle_events, HV_WRITE);
  416. }
  417. int hio_read (hio_t* io) {
  418. if (io->closed) {
  419. hloge("hio_read called but fd[%d] already closed!", io->fd);
  420. return -1;
  421. }
  422. hio_add(io, hio_handle_events, HV_READ);
  423. if (io->readbuf.tail > io->readbuf.head &&
  424. io->unpack_setting == NULL &&
  425. io->read_flags == 0) {
  426. hio_read_remain(io);
  427. }
  428. return 0;
  429. }
  430. int hio_write (hio_t* io, const void* buf, size_t len) {
  431. if (io->closed) {
  432. hloge("hio_write called but fd[%d] already closed!", io->fd);
  433. return -1;
  434. }
  435. int nwrite = 0, err = 0;
  436. hrecursive_mutex_lock(&io->write_mutex);
  437. #if WITH_KCP
  438. if (io->io_type == HIO_TYPE_KCP) {
  439. nwrite = hio_write_kcp(io, buf, len);
  440. // if (nwrite < 0) goto write_error;
  441. goto write_done;
  442. }
  443. #endif
  444. if (write_queue_empty(&io->write_queue)) {
  445. try_write:
  446. nwrite = __nio_write(io, buf, len);
  447. // printd("write retval=%d\n", nwrite);
  448. if (nwrite < 0) {
  449. err = socket_errno();
  450. if (err == EAGAIN) {
  451. nwrite = 0;
  452. hlogw("try_write failed, enqueue!");
  453. goto enqueue;
  454. } else {
  455. // perror("write");
  456. io->error = err;
  457. goto write_error;
  458. }
  459. }
  460. if (nwrite == 0) {
  461. goto disconnect;
  462. }
  463. if (nwrite == len) {
  464. goto write_done;
  465. }
  466. enqueue:
  467. hio_add(io, hio_handle_events, HV_WRITE);
  468. }
  469. if (nwrite < len) {
  470. if (io->write_bufsize + len - nwrite > MAX_WRITE_BUFSIZE) {
  471. if (io->write_bufsize > MAX_WRITE_BUFSIZE) {
  472. hloge("write bufsize > %u, close it!", (unsigned int)MAX_WRITE_BUFSIZE);
  473. io->error = ERR_OVER_LIMIT;
  474. goto write_error;
  475. }
  476. }
  477. offset_buf_t remain;
  478. remain.len = len - nwrite;
  479. remain.offset = 0;
  480. // NOTE: free in nio_write
  481. HV_ALLOC(remain.base, remain.len);
  482. memcpy(remain.base, ((char*)buf) + nwrite, remain.len);
  483. if (io->write_queue.maxsize == 0) {
  484. write_queue_init(&io->write_queue, 4);
  485. }
  486. write_queue_push_back(&io->write_queue, &remain);
  487. io->write_bufsize += remain.len;
  488. if (io->write_bufsize > WRITE_BUFSIZE_HIGH_WATER) {
  489. hlogw("write len=%d enqueue %u, bufsize=%u over high water %u",
  490. len, (unsigned int)(remain.len - remain.offset),
  491. (unsigned int)io->write_bufsize,
  492. (unsigned int)WRITE_BUFSIZE_HIGH_WATER);
  493. }
  494. }
  495. write_done:
  496. hrecursive_mutex_unlock(&io->write_mutex);
  497. if (nwrite > 0) {
  498. __write_cb(io, buf, nwrite);
  499. }
  500. return nwrite;
  501. write_error:
  502. disconnect:
  503. hrecursive_mutex_unlock(&io->write_mutex);
  504. /* NOTE:
  505. * We usually free resources in hclose_cb,
  506. * if hio_close_sync, we have to be very careful to avoid using freed resources.
  507. * But if hio_close_async, we do not have to worry about this.
  508. */
  509. hio_close_async(io);
  510. return nwrite < 0 ? nwrite : -1;
  511. }
  512. int hio_close (hio_t* io) {
  513. if (io->closed) return 0;
  514. if (hv_gettid() != io->loop->tid) {
  515. return hio_close_async(io);
  516. }
  517. hrecursive_mutex_lock(&io->write_mutex);
  518. if (io->closed) {
  519. hrecursive_mutex_unlock(&io->write_mutex);
  520. return 0;
  521. }
  522. if (!write_queue_empty(&io->write_queue) && io->error == 0 && io->close == 0) {
  523. io->close = 1;
  524. hrecursive_mutex_unlock(&io->write_mutex);
  525. hlogw("write_queue not empty, close later.");
  526. int timeout_ms = io->close_timeout ? io->close_timeout : HIO_DEFAULT_CLOSE_TIMEOUT;
  527. io->close_timer = htimer_add(io->loop, __close_timeout_cb, timeout_ms, 1);
  528. io->close_timer->privdata = io;
  529. return 0;
  530. }
  531. io->closed = 1;
  532. hrecursive_mutex_unlock(&io->write_mutex);
  533. hio_done(io);
  534. __close_cb(io);
  535. if (io->ssl) {
  536. hssl_free(io->ssl);
  537. io->ssl = NULL;
  538. }
  539. if (io->ssl_ctx && io->alloced_ssl_ctx) {
  540. hssl_ctx_free(io->ssl_ctx);
  541. io->ssl_ctx = NULL;
  542. }
  543. if (io->io_type & HIO_TYPE_SOCKET) {
  544. closesocket(io->fd);
  545. }
  546. return 0;
  547. }
  548. #endif