nio.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. #include "iowatcher.h"
  2. #ifndef EVENT_IOCP
  3. #include "hevent.h"
  4. #include "hsocket.h"
  5. #include "hssl.h"
  6. #include "hlog.h"
  7. #include "herr.h"
  8. #include "hthread.h"
  9. static void __connect_timeout_cb(htimer_t* timer) {
  10. hio_t* io = (hio_t*)timer->privdata;
  11. if (io) {
  12. char localaddrstr[SOCKADDR_STRLEN] = {0};
  13. char peeraddrstr[SOCKADDR_STRLEN] = {0};
  14. hlogw("connect timeout [%s] <=> [%s]",
  15. SOCKADDR_STR(io->localaddr, localaddrstr),
  16. SOCKADDR_STR(io->peeraddr, peeraddrstr));
  17. io->error = ETIMEDOUT;
  18. hio_close(io);
  19. }
  20. }
  21. static void __close_timeout_cb(htimer_t* timer) {
  22. hio_t* io = (hio_t*)timer->privdata;
  23. if (io) {
  24. char localaddrstr[SOCKADDR_STRLEN] = {0};
  25. char peeraddrstr[SOCKADDR_STRLEN] = {0};
  26. hlogw("close timeout [%s] <=> [%s]",
  27. SOCKADDR_STR(io->localaddr, localaddrstr),
  28. SOCKADDR_STR(io->peeraddr, peeraddrstr));
  29. io->error = ETIMEDOUT;
  30. hio_close(io);
  31. }
  32. }
  33. static void __accept_cb(hio_t* io) {
  34. hio_accept_cb(io);
  35. }
  36. static void __connect_cb(hio_t* io) {
  37. hio_del_connect_timer(io);
  38. hio_connect_cb(io);
  39. }
  40. static void __read_cb(hio_t* io, void* buf, int readbytes) {
  41. // printd("> %.*s\n", readbytes, buf);
  42. io->last_read_hrtime = io->loop->cur_hrtime;
  43. hio_handle_read(io, buf, readbytes);
  44. }
  45. static void __write_cb(hio_t* io, const void* buf, int writebytes) {
  46. // printd("< %.*s\n", writebytes, buf);
  47. io->last_write_hrtime = io->loop->cur_hrtime;
  48. hio_write_cb(io, buf, writebytes);
  49. }
  50. static void __close_cb(hio_t* io) {
  51. // printd("close fd=%d\n", io->fd);
  52. hio_del_connect_timer(io);
  53. hio_del_close_timer(io);
  54. hio_del_read_timer(io);
  55. hio_del_write_timer(io);
  56. hio_del_keepalive_timer(io);
  57. hio_del_heartbeat_timer(io);
  58. hio_close_cb(io);
  59. }
  60. static void ssl_server_handshake(hio_t* io) {
  61. printd("ssl server handshake...\n");
  62. int ret = hssl_accept(io->ssl);
  63. if (ret == 0) {
  64. // handshake finish
  65. hio_del(io, HV_READ);
  66. printd("ssl handshake finished.\n");
  67. __accept_cb(io);
  68. }
  69. else if (ret == HSSL_WANT_READ) {
  70. if ((io->events & HV_READ) == 0) {
  71. hio_add(io, ssl_server_handshake, HV_READ);
  72. }
  73. }
  74. else {
  75. hloge("ssl handshake failed: %d", ret);
  76. io->error = ERR_SSL_HANDSHAKE;
  77. hio_close(io);
  78. }
  79. }
  80. static void ssl_client_handshake(hio_t* io) {
  81. printd("ssl client handshake...\n");
  82. int ret = hssl_connect(io->ssl);
  83. if (ret == 0) {
  84. // handshake finish
  85. hio_del(io, HV_READ);
  86. printd("ssl handshake finished.\n");
  87. __connect_cb(io);
  88. }
  89. else if (ret == HSSL_WANT_READ) {
  90. if ((io->events & HV_READ) == 0) {
  91. hio_add(io, ssl_client_handshake, HV_READ);
  92. }
  93. }
  94. else {
  95. hloge("ssl handshake failed: %d", ret);
  96. io->error = ERR_SSL_HANDSHAKE;
  97. hio_close(io);
  98. }
  99. }
  100. static void nio_accept(hio_t* io) {
  101. // printd("nio_accept listenfd=%d\n", io->fd);
  102. int connfd = 0, err = 0, accept_cnt = 0;
  103. socklen_t addrlen;
  104. hio_t* connio = NULL;
  105. while (accept_cnt++ < 3) {
  106. addrlen = sizeof(sockaddr_u);
  107. connfd = accept(io->fd, io->peeraddr, &addrlen);
  108. if (connfd < 0) {
  109. err = socket_errno();
  110. if (err == EAGAIN || err == EINTR) {
  111. return;
  112. } else {
  113. perror("accept");
  114. io->error = err;
  115. goto accept_error;
  116. }
  117. }
  118. addrlen = sizeof(sockaddr_u);
  119. getsockname(connfd, io->localaddr, &addrlen);
  120. connio = hio_get(io->loop, connfd);
  121. // NOTE: inherit from listenio
  122. connio->accept_cb = io->accept_cb;
  123. connio->userdata = io->userdata;
  124. if (io->unpack_setting) {
  125. hio_set_unpack(connio, io->unpack_setting);
  126. }
  127. if (io->io_type == HIO_TYPE_SSL) {
  128. if (connio->ssl == NULL) {
  129. // io->ssl_ctx > g_ssl_ctx > hssl_ctx_new
  130. hssl_ctx_t ssl_ctx = NULL;
  131. if (io->ssl_ctx) {
  132. ssl_ctx = io->ssl_ctx;
  133. } else if (g_ssl_ctx) {
  134. ssl_ctx = g_ssl_ctx;
  135. } else {
  136. io->ssl_ctx = ssl_ctx = hssl_ctx_new(NULL);
  137. io->alloced_ssl_ctx = 1;
  138. }
  139. if (ssl_ctx == NULL) {
  140. io->error = ERR_NEW_SSL_CTX;
  141. goto accept_error;
  142. }
  143. hssl_t ssl = hssl_new(ssl_ctx, connfd);
  144. if (ssl == NULL) {
  145. io->error = ERR_NEW_SSL;
  146. goto accept_error;
  147. }
  148. connio->ssl = ssl;
  149. }
  150. hio_enable_ssl(connio);
  151. ssl_server_handshake(connio);
  152. }
  153. else {
  154. // NOTE: SSL call accept_cb after handshake finished
  155. __accept_cb(connio);
  156. }
  157. }
  158. return;
  159. accept_error:
  160. hloge("listenfd=%d accept error: %s:%d", io->fd, socket_strerror(io->error), io->error);
  161. // NOTE: Don't close listen fd automatically anyway.
  162. // hio_close(io);
  163. }
  164. static void nio_connect(hio_t* io) {
  165. // printd("nio_connect connfd=%d\n", io->fd);
  166. socklen_t addrlen = sizeof(sockaddr_u);
  167. int ret = getpeername(io->fd, io->peeraddr, &addrlen);
  168. if (ret < 0) {
  169. io->error = socket_errno();
  170. goto connect_error;
  171. }
  172. else {
  173. addrlen = sizeof(sockaddr_u);
  174. getsockname(io->fd, io->localaddr, &addrlen);
  175. if (io->io_type == HIO_TYPE_SSL) {
  176. if (io->ssl == NULL) {
  177. // io->ssl_ctx > g_ssl_ctx > hssl_ctx_new
  178. hssl_ctx_t ssl_ctx = NULL;
  179. if (io->ssl_ctx) {
  180. ssl_ctx = io->ssl_ctx;
  181. } else if (g_ssl_ctx) {
  182. ssl_ctx = g_ssl_ctx;
  183. } else {
  184. io->ssl_ctx = ssl_ctx = hssl_ctx_new(NULL);
  185. io->alloced_ssl_ctx = 1;
  186. }
  187. if (ssl_ctx == NULL) {
  188. io->error = ERR_NEW_SSL_CTX;
  189. goto connect_error;
  190. }
  191. hssl_t ssl = hssl_new(ssl_ctx, io->fd);
  192. if (ssl == NULL) {
  193. io->error = ERR_NEW_SSL;
  194. goto connect_error;
  195. }
  196. io->ssl = ssl;
  197. }
  198. if (io->hostname) {
  199. hssl_set_sni_hostname(io->ssl, io->hostname);
  200. }
  201. ssl_client_handshake(io);
  202. }
  203. else {
  204. // NOTE: SSL call connect_cb after handshake finished
  205. __connect_cb(io);
  206. }
  207. return;
  208. }
  209. connect_error:
  210. hlogw("connfd=%d connect error: %s:%d", io->fd, socket_strerror(io->error), io->error);
  211. hio_close(io);
  212. }
  213. static void nio_connect_event_cb(hevent_t* ev) {
  214. hio_t* io = (hio_t*)ev->userdata;
  215. uint32_t id = (uintptr_t)ev->privdata;
  216. if (io->id != id) return;
  217. nio_connect(io);
  218. }
  219. static int nio_connect_async(hio_t* io) {
  220. hevent_t ev;
  221. memset(&ev, 0, sizeof(ev));
  222. ev.cb = nio_connect_event_cb;
  223. ev.userdata = io;
  224. ev.privdata = (void*)(uintptr_t)io->id;
  225. hloop_post_event(io->loop, &ev);
  226. return 0;
  227. }
  228. static int __nio_read(hio_t* io, void* buf, int len) {
  229. int nread = 0;
  230. switch (io->io_type) {
  231. case HIO_TYPE_SSL:
  232. nread = hssl_read(io->ssl, buf, len);
  233. break;
  234. case HIO_TYPE_TCP:
  235. nread = recv(io->fd, buf, len, 0);
  236. break;
  237. case HIO_TYPE_UDP:
  238. case HIO_TYPE_KCP:
  239. case HIO_TYPE_IP:
  240. {
  241. socklen_t addrlen = sizeof(sockaddr_u);
  242. nread = recvfrom(io->fd, buf, len, 0, io->peeraddr, &addrlen);
  243. }
  244. break;
  245. default:
  246. nread = read(io->fd, buf, len);
  247. break;
  248. }
  249. // hlogd("read retval=%d", nread);
  250. return nread;
  251. }
  252. static int __nio_write(hio_t* io, const void* buf, int len, struct sockaddr* addr) {
  253. int nwrite = 0;
  254. switch (io->io_type) {
  255. case HIO_TYPE_SSL:
  256. nwrite = hssl_write(io->ssl, buf, len);
  257. break;
  258. case HIO_TYPE_TCP:
  259. {
  260. int flag = 0;
  261. #ifdef MSG_NOSIGNAL
  262. flag |= MSG_NOSIGNAL;
  263. #endif
  264. nwrite = send(io->fd, buf, len, flag);
  265. }
  266. break;
  267. case HIO_TYPE_UDP:
  268. case HIO_TYPE_KCP:
  269. case HIO_TYPE_IP:
  270. {
  271. if (addr == NULL) addr = io->peeraddr;
  272. nwrite = sendto(io->fd, buf, len, 0, addr, SOCKADDR_LEN(addr));
  273. if (((sockaddr_u*)io->localaddr)->sin.sin_port == 0) {
  274. socklen_t addrlen = sizeof(sockaddr_u);
  275. getsockname(io->fd, io->localaddr, &addrlen);
  276. }
  277. }
  278. break;
  279. default:
  280. nwrite = write(io->fd, buf, len);
  281. break;
  282. }
  283. // hlogd("write retval=%d", nwrite);
  284. return nwrite;
  285. }
  286. static void nio_read(hio_t* io) {
  287. // printd("nio_read fd=%d\n", io->fd);
  288. void* buf;
  289. int len = 0, nread = 0, err = 0;
  290. read:
  291. buf = io->readbuf.base + io->readbuf.tail;
  292. if (io->read_flags & HIO_READ_UNTIL_LENGTH) {
  293. len = io->read_until_length - (io->readbuf.tail - io->readbuf.head);
  294. } else {
  295. len = io->readbuf.len - io->readbuf.tail;
  296. }
  297. assert(len > 0);
  298. nread = __nio_read(io, buf, len);
  299. // printd("read retval=%d\n", nread);
  300. if (nread < 0) {
  301. err = socket_errno();
  302. if (err == EAGAIN || err == EINTR) {
  303. // goto read_done;
  304. return;
  305. } else if (err == EMSGSIZE) {
  306. nread = len;
  307. } else {
  308. // perror("read");
  309. io->error = err;
  310. goto read_error;
  311. }
  312. }
  313. if (nread == 0 && (io->io_type & HIO_TYPE_SOCK_STREAM)) {
  314. goto disconnect;
  315. }
  316. if (nread < len) {
  317. // NOTE: make string friendly
  318. ((char*)buf)[nread] = '\0';
  319. }
  320. io->readbuf.tail += nread;
  321. __read_cb(io, buf, nread);
  322. if (nread == len && !io->closed) {
  323. // NOTE: ssl may have own cache
  324. if (io->io_type == HIO_TYPE_SSL) {
  325. // read continue
  326. goto read;
  327. }
  328. }
  329. return;
  330. read_error:
  331. disconnect:
  332. if (io->io_type & HIO_TYPE_SOCK_STREAM) {
  333. hio_close(io);
  334. }
  335. }
  336. static void nio_write(hio_t* io) {
  337. // printd("nio_write fd=%d\n", io->fd);
  338. int nwrite = 0, err = 0;
  339. hrecursive_mutex_lock(&io->write_mutex);
  340. write:
  341. if (write_queue_empty(&io->write_queue)) {
  342. hrecursive_mutex_unlock(&io->write_mutex);
  343. if (io->close) {
  344. io->close = 0;
  345. hio_close(io);
  346. }
  347. return;
  348. }
  349. offset_buf_t* pbuf = write_queue_front(&io->write_queue);
  350. char* base = pbuf->base;
  351. char* buf = base + pbuf->offset;
  352. int len = pbuf->len - pbuf->offset;
  353. struct sockaddr* addr = NULL;
  354. if (io->io_type & (HIO_TYPE_SOCK_DGRAM | HIO_TYPE_SOCK_RAW)) {
  355. addr = (struct sockaddr*)base;
  356. }
  357. nwrite = __nio_write(io, buf, len, addr);
  358. // printd("write retval=%d\n", nwrite);
  359. if (nwrite < 0) {
  360. err = socket_errno();
  361. if (err == EAGAIN || err == EINTR) {
  362. hrecursive_mutex_unlock(&io->write_mutex);
  363. return;
  364. } else {
  365. // perror("write");
  366. io->error = err;
  367. goto write_error;
  368. }
  369. }
  370. if (nwrite == 0 && (io->io_type & HIO_TYPE_SOCK_STREAM)) {
  371. goto disconnect;
  372. }
  373. pbuf->offset += nwrite;
  374. io->write_bufsize -= nwrite;
  375. __write_cb(io, buf, nwrite);
  376. if (nwrite == len) {
  377. // NOTE: after write_cb, pbuf maybe invalid.
  378. // HV_FREE(pbuf->base);
  379. HV_FREE(base);
  380. write_queue_pop_front(&io->write_queue);
  381. if (!io->closed) {
  382. // write continue
  383. goto write;
  384. }
  385. }
  386. hrecursive_mutex_unlock(&io->write_mutex);
  387. return;
  388. write_error:
  389. disconnect:
  390. hrecursive_mutex_unlock(&io->write_mutex);
  391. if (io->io_type & HIO_TYPE_SOCK_STREAM) {
  392. hio_close(io);
  393. }
  394. }
  395. static void hio_handle_events(hio_t* io) {
  396. if ((io->events & HV_READ) && (io->revents & HV_READ)) {
  397. if (io->accept) {
  398. nio_accept(io);
  399. }
  400. else {
  401. nio_read(io);
  402. }
  403. }
  404. if ((io->events & HV_WRITE) && (io->revents & HV_WRITE)) {
  405. // NOTE: del HV_WRITE, if write_queue empty
  406. hrecursive_mutex_lock(&io->write_mutex);
  407. if (write_queue_empty(&io->write_queue)) {
  408. hio_del(io, HV_WRITE);
  409. }
  410. hrecursive_mutex_unlock(&io->write_mutex);
  411. if (io->connect) {
  412. // NOTE: connect just do once
  413. // ONESHOT
  414. io->connect = 0;
  415. nio_connect(io);
  416. }
  417. else {
  418. nio_write(io);
  419. }
  420. }
  421. io->revents = 0;
  422. }
  423. int hio_accept(hio_t* io) {
  424. io->accept = 1;
  425. return hio_add(io, hio_handle_events, HV_READ);
  426. }
  427. int hio_connect(hio_t* io) {
  428. int ret = connect(io->fd, io->peeraddr, SOCKADDR_LEN(io->peeraddr));
  429. #ifdef OS_WIN
  430. if (ret < 0 && socket_errno() != WSAEWOULDBLOCK) {
  431. #else
  432. if (ret < 0 && socket_errno() != EINPROGRESS) {
  433. #endif
  434. perror("connect");
  435. io->error = socket_errno();
  436. hio_close_async(io);
  437. return ret;
  438. }
  439. if (ret == 0) {
  440. // connect ok
  441. nio_connect_async(io);
  442. return 0;
  443. }
  444. int timeout = io->connect_timeout ? io->connect_timeout : HIO_DEFAULT_CONNECT_TIMEOUT;
  445. io->connect_timer = htimer_add(io->loop, __connect_timeout_cb, timeout, 1);
  446. io->connect_timer->privdata = io;
  447. io->connect = 1;
  448. return hio_add(io, hio_handle_events, HV_WRITE);
  449. }
  450. int hio_read (hio_t* io) {
  451. if (io->closed) {
  452. hloge("hio_read called but fd[%d] already closed!", io->fd);
  453. return -1;
  454. }
  455. hio_add(io, hio_handle_events, HV_READ);
  456. if (io->readbuf.tail > io->readbuf.head &&
  457. io->unpack_setting == NULL &&
  458. io->read_flags == 0) {
  459. hio_read_remain(io);
  460. }
  461. return 0;
  462. }
  463. static int hio_write4 (hio_t* io, const void* buf, size_t len, struct sockaddr* addr) {
  464. if (io->closed) {
  465. hloge("hio_write called but fd[%d] already closed!", io->fd);
  466. return -1;
  467. }
  468. int nwrite = 0, err = 0;
  469. hrecursive_mutex_lock(&io->write_mutex);
  470. #if WITH_KCP
  471. if (io->io_type == HIO_TYPE_KCP) {
  472. nwrite = hio_write_kcp(io, buf, len);
  473. // if (nwrite < 0) goto write_error;
  474. goto write_done;
  475. }
  476. #endif
  477. if (write_queue_empty(&io->write_queue)) {
  478. try_write:
  479. nwrite = __nio_write(io, buf, len, addr);
  480. // printd("write retval=%d\n", nwrite);
  481. if (nwrite < 0) {
  482. err = socket_errno();
  483. if (err == EAGAIN || err == EINTR) {
  484. nwrite = 0;
  485. hlogw("try_write failed, enqueue!");
  486. goto enqueue;
  487. } else {
  488. // perror("write");
  489. io->error = err;
  490. goto write_error;
  491. }
  492. }
  493. if (nwrite == len) {
  494. goto write_done;
  495. }
  496. if (nwrite == 0 && (io->io_type & HIO_TYPE_SOCK_STREAM)) {
  497. goto disconnect;
  498. }
  499. enqueue:
  500. hio_add(io, hio_handle_events, HV_WRITE);
  501. }
  502. if (nwrite < len) {
  503. size_t unwritten_len = len - nwrite;
  504. if (io->write_bufsize + unwritten_len > io->max_write_bufsize) {
  505. hloge("write bufsize > %u, close it!", io->max_write_bufsize);
  506. io->error = ERR_OVER_LIMIT;
  507. goto write_error;
  508. }
  509. size_t addrlen = 0;
  510. if ((io->io_type & (HIO_TYPE_SOCK_DGRAM | HIO_TYPE_SOCK_RAW)) && addr) {
  511. addrlen = SOCKADDR_LEN(addr);
  512. }
  513. offset_buf_t remain;
  514. remain.offset = addrlen;
  515. remain.len = addrlen + unwritten_len;
  516. // NOTE: free in nio_write
  517. HV_ALLOC(remain.base, remain.len);
  518. if (addr && addrlen > 0) {
  519. memcpy(remain.base, addr, addrlen);
  520. }
  521. memcpy(remain.base + remain.offset, ((char*)buf) + nwrite, unwritten_len);
  522. if (io->write_queue.maxsize == 0) {
  523. write_queue_init(&io->write_queue, 4);
  524. }
  525. write_queue_push_back(&io->write_queue, &remain);
  526. io->write_bufsize += unwritten_len;
  527. if (io->write_bufsize > WRITE_BUFSIZE_HIGH_WATER) {
  528. hlogw("write len=%u enqueue %u, bufsize=%u over high water %u",
  529. (unsigned int)len,
  530. (unsigned int)unwritten_len,
  531. (unsigned int)io->write_bufsize,
  532. (unsigned int)WRITE_BUFSIZE_HIGH_WATER);
  533. }
  534. }
  535. write_done:
  536. hrecursive_mutex_unlock(&io->write_mutex);
  537. if (nwrite > 0) {
  538. __write_cb(io, buf, nwrite);
  539. }
  540. return nwrite;
  541. write_error:
  542. disconnect:
  543. hrecursive_mutex_unlock(&io->write_mutex);
  544. /* NOTE:
  545. * We usually free resources in hclose_cb,
  546. * if hio_close_sync, we have to be very careful to avoid using freed resources.
  547. * But if hio_close_async, we do not have to worry about this.
  548. */
  549. if (io->io_type & HIO_TYPE_SOCK_STREAM) {
  550. hio_close_async(io);
  551. }
  552. return nwrite < 0 ? nwrite : -1;
  553. }
  554. int hio_write (hio_t* io, const void* buf, size_t len) {
  555. return hio_write4(io, buf, len, io->peeraddr);
  556. }
  557. int hio_sendto (hio_t* io, const void* buf, size_t len, struct sockaddr* addr) {
  558. return hio_write4(io, buf, len, addr ? addr : io->peeraddr);
  559. }
  560. int hio_close (hio_t* io) {
  561. if (io->closed) return 0;
  562. if (io->destroy == 0 && hv_gettid() != io->loop->tid) {
  563. return hio_close_async(io);
  564. }
  565. hrecursive_mutex_lock(&io->write_mutex);
  566. if (io->closed) {
  567. hrecursive_mutex_unlock(&io->write_mutex);
  568. return 0;
  569. }
  570. if (!write_queue_empty(&io->write_queue) && io->error == 0 && io->close == 0 && io->destroy == 0) {
  571. io->close = 1;
  572. hrecursive_mutex_unlock(&io->write_mutex);
  573. hlogw("write_queue not empty, close later.");
  574. int timeout_ms = io->close_timeout ? io->close_timeout : HIO_DEFAULT_CLOSE_TIMEOUT;
  575. io->close_timer = htimer_add(io->loop, __close_timeout_cb, timeout_ms, 1);
  576. io->close_timer->privdata = io;
  577. return 0;
  578. }
  579. io->closed = 1;
  580. hrecursive_mutex_unlock(&io->write_mutex);
  581. hio_done(io);
  582. __close_cb(io);
  583. if (io->ssl) {
  584. hssl_free(io->ssl);
  585. io->ssl = NULL;
  586. }
  587. if (io->ssl_ctx && io->alloced_ssl_ctx) {
  588. hssl_ctx_free(io->ssl_ctx);
  589. io->ssl_ctx = NULL;
  590. }
  591. SAFE_FREE(io->hostname);
  592. if (io->io_type & HIO_TYPE_SOCKET) {
  593. closesocket(io->fd);
  594. } else if (io->io_type == HIO_TYPE_PIPE) {
  595. close(io->fd);
  596. }
  597. return 0;
  598. }
  599. #endif