nio.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. #include "iowatcher.h"
  2. #ifndef EVENT_IOCP
  3. #include "hevent.h"
  4. #include "hsocket.h"
  5. #include "hssl.h"
  6. #include "hlog.h"
  7. #include "hthread.h"
  8. static void __connect_timeout_cb(htimer_t* timer) {
  9. hio_t* io = (hio_t*)timer->privdata;
  10. if (io) {
  11. char localaddrstr[SOCKADDR_STRLEN] = {0};
  12. char peeraddrstr[SOCKADDR_STRLEN] = {0};
  13. hlogw("connect timeout [%s] <=> [%s]",
  14. SOCKADDR_STR(io->localaddr, localaddrstr),
  15. SOCKADDR_STR(io->peeraddr, peeraddrstr));
  16. io->error = ETIMEDOUT;
  17. hio_close(io);
  18. }
  19. }
  20. static void __close_timeout_cb(htimer_t* timer) {
  21. hio_t* io = (hio_t*)timer->privdata;
  22. if (io) {
  23. char localaddrstr[SOCKADDR_STRLEN] = {0};
  24. char peeraddrstr[SOCKADDR_STRLEN] = {0};
  25. hlogw("close timeout [%s] <=> [%s]",
  26. SOCKADDR_STR(io->localaddr, localaddrstr),
  27. SOCKADDR_STR(io->peeraddr, peeraddrstr));
  28. io->error = ETIMEDOUT;
  29. hio_close(io);
  30. }
  31. }
  32. static void __accept_cb(hio_t* io) {
  33. hio_accept_cb(io);
  34. }
  35. static void __connect_cb(hio_t* io) {
  36. hio_del_connect_timer(io);
  37. hio_connect_cb(io);
  38. }
  39. static void __read_cb(hio_t* io, void* buf, int readbytes) {
  40. // printd("> %.*s\n", readbytes, buf);
  41. io->last_read_hrtime = io->loop->cur_hrtime;
  42. hio_handle_read(io, buf, readbytes);
  43. }
  44. static void __write_cb(hio_t* io, const void* buf, int writebytes) {
  45. // printd("< %.*s\n", writebytes, buf);
  46. io->last_write_hrtime = io->loop->cur_hrtime;
  47. hio_write_cb(io, buf, writebytes);
  48. }
  49. static void __close_cb(hio_t* io) {
  50. // printd("close fd=%d\n", io->fd);
  51. hio_del_connect_timer(io);
  52. hio_del_close_timer(io);
  53. hio_del_read_timer(io);
  54. hio_del_write_timer(io);
  55. hio_del_keepalive_timer(io);
  56. hio_del_heartbeat_timer(io);
  57. hio_close_cb(io);
  58. }
  59. static void ssl_server_handshake(hio_t* io) {
  60. printd("ssl server handshake...\n");
  61. int ret = hssl_accept(io->ssl);
  62. if (ret == 0) {
  63. // handshake finish
  64. iowatcher_del_event(io->loop, io->fd, HV_READ);
  65. io->events &= ~HV_READ;
  66. io->cb = NULL;
  67. printd("ssl handshake finished.\n");
  68. __accept_cb(io);
  69. }
  70. else if (ret == HSSL_WANT_READ) {
  71. if ((io->events & HV_READ) == 0) {
  72. hio_add(io, ssl_server_handshake, HV_READ);
  73. }
  74. }
  75. else {
  76. hloge("ssl handshake failed: %d", ret);
  77. hio_close(io);
  78. }
  79. }
  80. static void ssl_client_handshake(hio_t* io) {
  81. printd("ssl client handshake...\n");
  82. int ret = hssl_connect(io->ssl);
  83. if (ret == 0) {
  84. // handshake finish
  85. iowatcher_del_event(io->loop, io->fd, HV_READ);
  86. io->events &= ~HV_READ;
  87. io->cb = NULL;
  88. printd("ssl handshake finished.\n");
  89. __connect_cb(io);
  90. }
  91. else if (ret == HSSL_WANT_READ) {
  92. if ((io->events & HV_READ) == 0) {
  93. hio_add(io, ssl_client_handshake, HV_READ);
  94. }
  95. }
  96. else {
  97. hloge("ssl handshake failed: %d", ret);
  98. hio_close(io);
  99. }
  100. }
  101. static void nio_accept(hio_t* io) {
  102. // printd("nio_accept listenfd=%d\n", io->fd);
  103. int connfd = 0, err = 0, accept_cnt = 0;
  104. socklen_t addrlen;
  105. hio_t* connio = NULL;
  106. while (accept_cnt++ < 3) {
  107. addrlen = sizeof(sockaddr_u);
  108. connfd = accept(io->fd, io->peeraddr, &addrlen);
  109. if (connfd < 0) {
  110. err = socket_errno();
  111. if (err == EAGAIN || err == EINTR) {
  112. return;
  113. } else {
  114. perror("accept");
  115. io->error = err;
  116. goto accept_error;
  117. }
  118. }
  119. addrlen = sizeof(sockaddr_u);
  120. getsockname(connfd, io->localaddr, &addrlen);
  121. connio = hio_get(io->loop, connfd);
  122. // NOTE: inherit from listenio
  123. connio->accept_cb = io->accept_cb;
  124. connio->userdata = io->userdata;
  125. if (io->unpack_setting) {
  126. hio_set_unpack(connio, io->unpack_setting);
  127. }
  128. if (io->io_type == HIO_TYPE_SSL) {
  129. if (connio->ssl == NULL) {
  130. // io->ssl_ctx > g_ssl_ctx > hssl_ctx_new
  131. hssl_ctx_t ssl_ctx = NULL;
  132. if (io->ssl_ctx) {
  133. ssl_ctx = io->ssl_ctx;
  134. } else if (g_ssl_ctx) {
  135. ssl_ctx = g_ssl_ctx;
  136. } else {
  137. io->ssl_ctx = ssl_ctx = hssl_ctx_new(NULL);
  138. io->alloced_ssl_ctx = 1;
  139. }
  140. if (ssl_ctx == NULL) {
  141. io->error = HSSL_ERROR;
  142. goto accept_error;
  143. }
  144. hssl_t ssl = hssl_new(ssl_ctx, connfd);
  145. if (ssl == NULL) {
  146. io->error = HSSL_ERROR;
  147. goto accept_error;
  148. }
  149. connio->ssl = ssl;
  150. }
  151. hio_enable_ssl(connio);
  152. ssl_server_handshake(connio);
  153. }
  154. else {
  155. // NOTE: SSL call accept_cb after handshake finished
  156. __accept_cb(connio);
  157. }
  158. }
  159. return;
  160. accept_error:
  161. hloge("listenfd=%d accept error: %s:%d", io->fd, socket_strerror(io->error), io->error);
  162. hio_close(io);
  163. }
  164. static void nio_connect(hio_t* io) {
  165. // printd("nio_connect connfd=%d\n", io->fd);
  166. socklen_t addrlen = sizeof(sockaddr_u);
  167. int ret = getpeername(io->fd, io->peeraddr, &addrlen);
  168. if (ret < 0) {
  169. io->error = socket_errno();
  170. goto connect_error;
  171. }
  172. else {
  173. addrlen = sizeof(sockaddr_u);
  174. getsockname(io->fd, io->localaddr, &addrlen);
  175. if (io->io_type == HIO_TYPE_SSL) {
  176. if (io->ssl == NULL) {
  177. // io->ssl_ctx > g_ssl_ctx > hssl_ctx_new
  178. hssl_ctx_t ssl_ctx = NULL;
  179. if (io->ssl_ctx) {
  180. ssl_ctx = io->ssl_ctx;
  181. } else if (g_ssl_ctx) {
  182. ssl_ctx = g_ssl_ctx;
  183. } else {
  184. io->ssl_ctx = ssl_ctx = hssl_ctx_new(NULL);
  185. io->alloced_ssl_ctx = 1;
  186. }
  187. if (ssl_ctx == NULL) {
  188. goto connect_error;
  189. }
  190. hssl_t ssl = hssl_new(ssl_ctx, io->fd);
  191. if (ssl == NULL) {
  192. goto connect_error;
  193. }
  194. io->ssl = ssl;
  195. }
  196. ssl_client_handshake(io);
  197. }
  198. else {
  199. // NOTE: SSL call connect_cb after handshake finished
  200. __connect_cb(io);
  201. }
  202. return;
  203. }
  204. connect_error:
  205. hlogw("connfd=%d connect error: %s:%d\n", io->fd, socket_strerror(io->error), io->error);
  206. hio_close(io);
  207. }
  208. static int __nio_read(hio_t* io, void* buf, int len) {
  209. int nread = 0;
  210. switch (io->io_type) {
  211. case HIO_TYPE_SSL:
  212. nread = hssl_read(io->ssl, buf, len);
  213. break;
  214. case HIO_TYPE_TCP:
  215. #ifdef OS_UNIX
  216. nread = read(io->fd, buf, len);
  217. #else
  218. nread = recv(io->fd, buf, len, 0);
  219. #endif
  220. break;
  221. case HIO_TYPE_UDP:
  222. case HIO_TYPE_KCP:
  223. case HIO_TYPE_IP:
  224. {
  225. socklen_t addrlen = sizeof(sockaddr_u);
  226. nread = recvfrom(io->fd, buf, len, 0, io->peeraddr, &addrlen);
  227. }
  228. break;
  229. default:
  230. nread = read(io->fd, buf, len);
  231. break;
  232. }
  233. // hlogd("read retval=%d", nread);
  234. return nread;
  235. }
  236. static int __nio_write(hio_t* io, const void* buf, int len) {
  237. int nwrite = 0;
  238. switch (io->io_type) {
  239. case HIO_TYPE_SSL:
  240. nwrite = hssl_write(io->ssl, buf, len);
  241. break;
  242. case HIO_TYPE_TCP:
  243. #ifdef OS_UNIX
  244. nwrite = write(io->fd, buf, len);
  245. #else
  246. nwrite = send(io->fd, buf, len, 0);
  247. #endif
  248. break;
  249. case HIO_TYPE_UDP:
  250. case HIO_TYPE_KCP:
  251. case HIO_TYPE_IP:
  252. nwrite = sendto(io->fd, buf, len, 0, io->peeraddr, SOCKADDR_LEN(io->peeraddr));
  253. break;
  254. default:
  255. nwrite = write(io->fd, buf, len);
  256. break;
  257. }
  258. // hlogd("write retval=%d", nwrite);
  259. return nwrite;
  260. }
  261. static void nio_read(hio_t* io) {
  262. // printd("nio_read fd=%d\n", io->fd);
  263. void* buf;
  264. int len = 0, nread = 0, err = 0;
  265. read:
  266. buf = io->readbuf.base + io->readbuf.tail;
  267. if (io->read_flags & HIO_READ_UNTIL_LENGTH) {
  268. len = io->read_until_length - (io->readbuf.tail - io->readbuf.head);
  269. } else {
  270. len = io->readbuf.len - io->readbuf.tail;
  271. }
  272. assert(len > 0);
  273. nread = __nio_read(io, buf, len);
  274. // printd("read retval=%d\n", nread);
  275. if (nread < 0) {
  276. err = socket_errno();
  277. if (err == EAGAIN) {
  278. // goto read_done;
  279. return;
  280. } else if (err == EMSGSIZE) {
  281. // ignore
  282. return;
  283. } else {
  284. // perror("read");
  285. io->error = err;
  286. goto read_error;
  287. }
  288. }
  289. if (nread == 0) {
  290. goto disconnect;
  291. }
  292. io->readbuf.tail += nread;
  293. __read_cb(io, buf, nread);
  294. // if (nread == len) goto read;
  295. return;
  296. read_error:
  297. disconnect:
  298. hio_close(io);
  299. }
  300. static void nio_write(hio_t* io) {
  301. // printd("nio_write fd=%d\n", io->fd);
  302. int nwrite = 0, err = 0;
  303. hrecursive_mutex_lock(&io->write_mutex);
  304. write:
  305. if (write_queue_empty(&io->write_queue)) {
  306. hrecursive_mutex_unlock(&io->write_mutex);
  307. if (io->close) {
  308. io->close = 0;
  309. hio_close(io);
  310. }
  311. return;
  312. }
  313. offset_buf_t* pbuf = write_queue_front(&io->write_queue);
  314. char* base = pbuf->base;
  315. char* buf = base + pbuf->offset;
  316. int len = pbuf->len - pbuf->offset;
  317. nwrite = __nio_write(io, buf, len);
  318. // printd("write retval=%d\n", nwrite);
  319. if (nwrite < 0) {
  320. err = socket_errno();
  321. if (err == EAGAIN) {
  322. hrecursive_mutex_unlock(&io->write_mutex);
  323. return;
  324. } else {
  325. // perror("write");
  326. io->error = err;
  327. goto write_error;
  328. }
  329. }
  330. if (nwrite == 0) {
  331. goto disconnect;
  332. }
  333. pbuf->offset += nwrite;
  334. io->write_bufsize -= nwrite;
  335. __write_cb(io, buf, nwrite);
  336. if (nwrite == len) {
  337. // NOTE: after write_cb, pbuf maybe invalid.
  338. // HV_FREE(pbuf->base);
  339. HV_FREE(base);
  340. write_queue_pop_front(&io->write_queue);
  341. if (!io->closed) {
  342. // write continue
  343. goto write;
  344. }
  345. }
  346. hrecursive_mutex_unlock(&io->write_mutex);
  347. return;
  348. write_error:
  349. disconnect:
  350. hrecursive_mutex_unlock(&io->write_mutex);
  351. hio_close(io);
  352. }
  353. static void hio_handle_events(hio_t* io) {
  354. if ((io->events & HV_READ) && (io->revents & HV_READ)) {
  355. if (io->accept) {
  356. nio_accept(io);
  357. }
  358. else {
  359. nio_read(io);
  360. }
  361. }
  362. if ((io->events & HV_WRITE) && (io->revents & HV_WRITE)) {
  363. // NOTE: del HV_WRITE, if write_queue empty
  364. hrecursive_mutex_lock(&io->write_mutex);
  365. if (write_queue_empty(&io->write_queue)) {
  366. iowatcher_del_event(io->loop, io->fd, HV_WRITE);
  367. io->events &= ~HV_WRITE;
  368. }
  369. hrecursive_mutex_unlock(&io->write_mutex);
  370. if (io->connect) {
  371. // NOTE: connect just do once
  372. // ONESHOT
  373. io->connect = 0;
  374. nio_connect(io);
  375. }
  376. else {
  377. nio_write(io);
  378. }
  379. }
  380. io->revents = 0;
  381. }
  382. int hio_accept(hio_t* io) {
  383. io->accept = 1;
  384. return hio_add(io, hio_handle_events, HV_READ);
  385. }
  386. int hio_connect(hio_t* io) {
  387. int ret = connect(io->fd, io->peeraddr, SOCKADDR_LEN(io->peeraddr));
  388. #ifdef OS_WIN
  389. if (ret < 0 && socket_errno() != WSAEWOULDBLOCK) {
  390. #else
  391. if (ret < 0 && socket_errno() != EINPROGRESS) {
  392. #endif
  393. perror("connect");
  394. hio_close(io);
  395. return ret;
  396. }
  397. if (ret == 0) {
  398. // connect ok
  399. nio_connect(io);
  400. return 0;
  401. }
  402. int timeout = io->connect_timeout ? io->connect_timeout : HIO_DEFAULT_CONNECT_TIMEOUT;
  403. io->connect_timer = htimer_add(io->loop, __connect_timeout_cb, timeout, 1);
  404. io->connect_timer->privdata = io;
  405. io->connect = 1;
  406. return hio_add(io, hio_handle_events, HV_WRITE);
  407. }
  408. int hio_read (hio_t* io) {
  409. if (io->closed) {
  410. hloge("hio_read called but fd[%d] already closed!", io->fd);
  411. return -1;
  412. }
  413. hio_add(io, hio_handle_events, HV_READ);
  414. if (io->readbuf.tail > io->readbuf.head &&
  415. io->unpack_setting == NULL &&
  416. io->read_flags == 0) {
  417. hio_read_remain(io);
  418. }
  419. return 0;
  420. }
  421. int hio_write (hio_t* io, const void* buf, size_t len) {
  422. if (io->closed) {
  423. hloge("hio_write called but fd[%d] already closed!", io->fd);
  424. return -1;
  425. }
  426. int nwrite = 0, err = 0;
  427. hrecursive_mutex_lock(&io->write_mutex);
  428. #if WITH_KCP
  429. if (io->io_type == HIO_TYPE_KCP) {
  430. nwrite = hio_write_kcp(io, buf, len);
  431. // if (nwrite < 0) goto write_error;
  432. goto write_done;
  433. }
  434. #endif
  435. if (write_queue_empty(&io->write_queue)) {
  436. try_write:
  437. nwrite = __nio_write(io, buf, len);
  438. // printd("write retval=%d\n", nwrite);
  439. if (nwrite < 0) {
  440. err = socket_errno();
  441. if (err == EAGAIN) {
  442. nwrite = 0;
  443. hlogw("try_write failed, enqueue!");
  444. goto enqueue;
  445. } else {
  446. // perror("write");
  447. io->error = err;
  448. goto write_error;
  449. }
  450. }
  451. if (nwrite == 0) {
  452. goto disconnect;
  453. }
  454. if (nwrite == len) {
  455. goto write_done;
  456. }
  457. enqueue:
  458. hio_add(io, hio_handle_events, HV_WRITE);
  459. }
  460. if (nwrite < len) {
  461. if (io->write_bufsize + len - nwrite > MAX_WRITE_BUFSIZE) {
  462. if (io->write_bufsize > MAX_WRITE_BUFSIZE) {
  463. hloge("write bufsize > %u, close it!", (unsigned int)MAX_WRITE_BUFSIZE);
  464. goto write_error;
  465. }
  466. }
  467. offset_buf_t remain;
  468. remain.len = len - nwrite;
  469. remain.offset = 0;
  470. // NOTE: free in nio_write
  471. HV_ALLOC(remain.base, remain.len);
  472. memcpy(remain.base, ((char*)buf) + nwrite, remain.len);
  473. if (io->write_queue.maxsize == 0) {
  474. write_queue_init(&io->write_queue, 4);
  475. }
  476. write_queue_push_back(&io->write_queue, &remain);
  477. io->write_bufsize += remain.len;
  478. if (io->write_bufsize > WRITE_BUFSIZE_HIGH_WATER) {
  479. hlogw("write len=%d enqueue %u, bufsize=%u over high water %u",
  480. len, (unsigned int)(remain.len - remain.offset),
  481. (unsigned int)io->write_bufsize,
  482. (unsigned int)WRITE_BUFSIZE_HIGH_WATER);
  483. }
  484. }
  485. write_done:
  486. hrecursive_mutex_unlock(&io->write_mutex);
  487. if (nwrite > 0) {
  488. __write_cb(io, buf, nwrite);
  489. }
  490. return nwrite;
  491. write_error:
  492. disconnect:
  493. hrecursive_mutex_unlock(&io->write_mutex);
  494. /* NOTE:
  495. * We usually free resources in hclose_cb,
  496. * if hio_close_sync, we have to be very careful to avoid using freed resources.
  497. * But if hio_close_async, we do not have to worry about this.
  498. */
  499. hio_close_async(io);
  500. return nwrite < 0 ? nwrite : -1;
  501. }
  502. int hio_close (hio_t* io) {
  503. if (io->closed) return 0;
  504. if (hv_gettid() != io->loop->tid) {
  505. return hio_close_async(io);
  506. }
  507. hrecursive_mutex_lock(&io->write_mutex);
  508. if (io->closed) {
  509. hrecursive_mutex_unlock(&io->write_mutex);
  510. return 0;
  511. }
  512. if (!write_queue_empty(&io->write_queue) && io->error == 0 && io->close == 0) {
  513. io->close = 1;
  514. hrecursive_mutex_unlock(&io->write_mutex);
  515. hlogw("write_queue not empty, close later.");
  516. int timeout_ms = io->close_timeout ? io->close_timeout : HIO_DEFAULT_CLOSE_TIMEOUT;
  517. io->close_timer = htimer_add(io->loop, __close_timeout_cb, timeout_ms, 1);
  518. io->close_timer->privdata = io;
  519. return 0;
  520. }
  521. io->closed = 1;
  522. hrecursive_mutex_unlock(&io->write_mutex);
  523. hio_done(io);
  524. __close_cb(io);
  525. if (io->ssl) {
  526. hssl_free(io->ssl);
  527. io->ssl = NULL;
  528. }
  529. if (io->ssl_ctx && io->alloced_ssl_ctx) {
  530. hssl_ctx_free(io->ssl_ctx);
  531. io->ssl_ctx = NULL;
  532. }
  533. if (io->io_type & HIO_TYPE_SOCKET) {
  534. closesocket(io->fd);
  535. }
  536. return 0;
  537. }
  538. #endif