TcpServer.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. #ifndef HV_TCP_SERVER_HPP_
  2. #define HV_TCP_SERVER_HPP_
  3. #include "hsocket.h"
  4. #include "hssl.h"
  5. #include "hlog.h"
  6. #include "EventLoopThreadPool.h"
  7. #include "Channel.h"
  8. namespace hv {
  9. template<class TSocketChannel = SocketChannel>
  10. class TcpServerTmpl {
  11. public:
  12. typedef std::shared_ptr<TSocketChannel> TSocketChannelPtr;
  13. TcpServerTmpl() {
  14. listenfd = -1;
  15. tls = false;
  16. unpack_setting.mode = UNPACK_MODE_NONE;
  17. max_connections = 0xFFFFFFFF;
  18. load_balance = LB_RoundRobin;
  19. }
  20. virtual ~TcpServerTmpl() {
  21. }
  22. EventLoopPtr loop(int idx = -1) {
  23. return worker_threads.loop(idx);
  24. }
  25. //@retval >=0 listenfd, <0 error
  26. int createsocket(int port, const char* host = "0.0.0.0") {
  27. listenfd = Listen(port, host);
  28. return listenfd;
  29. }
  30. // closesocket thread-safe
  31. void closesocket() {
  32. if (listenfd >= 0) {
  33. hio_close_async(hio_get(acceptor_thread.hloop(), listenfd));
  34. listenfd = -1;
  35. }
  36. }
  37. void setMaxConnectionNum(uint32_t num) {
  38. max_connections = num;
  39. }
  40. void setLoadBalance(load_balance_e lb) {
  41. load_balance = lb;
  42. }
  43. // NOTE: totalThreadNum = 1 acceptor_thread + N worker_threads (N can be 0)
  44. void setThreadNum(int num) {
  45. worker_threads.setThreadNum(num);
  46. }
  47. int startAccept() {
  48. assert(listenfd >= 0);
  49. hio_t* listenio = haccept(acceptor_thread.hloop(), listenfd, onAccept);
  50. hevent_set_userdata(listenio, this);
  51. if (tls) {
  52. hio_enable_ssl(listenio);
  53. }
  54. return 0;
  55. }
  56. void start(bool wait_threads_started = true) {
  57. if (worker_threads.threadNum() > 0) {
  58. worker_threads.start(wait_threads_started);
  59. }
  60. acceptor_thread.start(wait_threads_started, std::bind(&TcpServerTmpl::startAccept, this));
  61. }
  62. // stop thread-safe
  63. void stop(bool wait_threads_stopped = true) {
  64. acceptor_thread.stop(wait_threads_stopped);
  65. if (worker_threads.threadNum() > 0) {
  66. worker_threads.stop(wait_threads_stopped);
  67. }
  68. }
  69. int withTLS(hssl_ctx_opt_t* opt = NULL) {
  70. tls = true;
  71. if (opt) {
  72. opt->endpoint = HSSL_SERVER;
  73. if (hssl_ctx_init(opt) == NULL) {
  74. fprintf(stderr, "hssl_ctx_init failed!\n");
  75. return -1;
  76. }
  77. }
  78. return 0;
  79. }
  80. void setUnpack(unpack_setting_t* setting) {
  81. if (setting) {
  82. unpack_setting = *setting;
  83. } else {
  84. unpack_setting.mode = UNPACK_MODE_NONE;
  85. }
  86. }
  87. // channel
  88. const TSocketChannelPtr& addChannel(hio_t* io) {
  89. uint32_t id = hio_id(io);
  90. auto channel = TSocketChannelPtr(new TSocketChannel(io));
  91. std::lock_guard<std::mutex> locker(mutex_);
  92. channels[id] = channel;
  93. return channels[id];
  94. }
  95. TSocketChannelPtr getChannelById(uint32_t id) {
  96. std::lock_guard<std::mutex> locker(mutex_);
  97. auto iter = channels.find(id);
  98. return iter != channels.end() ? iter->second : NULL;
  99. }
  100. void removeChannel(const TSocketChannelPtr& channel) {
  101. uint32_t id = channel->id();
  102. std::lock_guard<std::mutex> locker(mutex_);
  103. channels.erase(id);
  104. }
  105. size_t connectionNum() {
  106. std::lock_guard<std::mutex> locker(mutex_);
  107. return channels.size();
  108. }
  109. int foreachChannel(std::function<void(const TSocketChannelPtr& channel)> fn) {
  110. std::lock_guard<std::mutex> locker(mutex_);
  111. for (auto& pair : channels) {
  112. fn(pair.second);
  113. }
  114. return channels.size();
  115. }
  116. // broadcast thread-safe
  117. int broadcast(const void* data, int size) {
  118. return foreachChannel([data, size](const TSocketChannelPtr& channel) {
  119. channel->write(data, size);
  120. });
  121. }
  122. int broadcast(const std::string& str) {
  123. return broadcast(str.data(), str.size());
  124. }
  125. private:
  126. static void newConnEvent(hio_t* connio) {
  127. TcpServerTmpl* server = (TcpServerTmpl*)hevent_userdata(connio);
  128. if (server->connectionNum() >= server->max_connections) {
  129. hlogw("over max_connections");
  130. hio_close(connio);
  131. return;
  132. }
  133. // NOTE: attach to worker loop
  134. EventLoop* worker_loop = currentThreadEventLoop;
  135. assert(worker_loop != NULL);
  136. hio_attach(worker_loop->loop(), connio);
  137. const TSocketChannelPtr& channel = server->addChannel(connio);
  138. channel->status = SocketChannel::CONNECTED;
  139. channel->onread = [server, &channel](Buffer* buf) {
  140. if (server->onMessage) {
  141. server->onMessage(channel, buf);
  142. }
  143. };
  144. channel->onwrite = [server, &channel](Buffer* buf) {
  145. if (server->onWriteComplete) {
  146. server->onWriteComplete(channel, buf);
  147. }
  148. };
  149. channel->onclose = [server, &channel]() {
  150. EventLoop* worker_loop = currentThreadEventLoop;
  151. assert(worker_loop != NULL);
  152. --worker_loop->connectionNum;
  153. channel->status = SocketChannel::CLOSED;
  154. if (server->onConnection) {
  155. server->onConnection(channel);
  156. }
  157. server->removeChannel(channel);
  158. // NOTE: After removeChannel, channel may be destroyed,
  159. // so in this lambda function, no code should be added below.
  160. };
  161. if (server->unpack_setting.mode != UNPACK_MODE_NONE) {
  162. channel->setUnpack(&server->unpack_setting);
  163. }
  164. channel->startRead();
  165. if (server->onConnection) {
  166. server->onConnection(channel);
  167. }
  168. }
  169. static void onAccept(hio_t* connio) {
  170. TcpServerTmpl* server = (TcpServerTmpl*)hevent_userdata(connio);
  171. // NOTE: detach from acceptor loop
  172. hio_detach(connio);
  173. EventLoopPtr worker_loop = server->worker_threads.nextLoop(server->load_balance);
  174. if (worker_loop == NULL) {
  175. worker_loop = server->acceptor_thread.loop();
  176. }
  177. ++worker_loop->connectionNum;
  178. worker_loop->runInLoop(std::bind(&TcpServerTmpl::newConnEvent, connio));
  179. }
  180. public:
  181. int listenfd;
  182. bool tls;
  183. unpack_setting_t unpack_setting;
  184. // Callback
  185. std::function<void(const TSocketChannelPtr&)> onConnection;
  186. std::function<void(const TSocketChannelPtr&, Buffer*)> onMessage;
  187. // NOTE: Use Channel::isWriteComplete in onWriteComplete callback to determine whether all data has been written.
  188. std::function<void(const TSocketChannelPtr&, Buffer*)> onWriteComplete;
  189. uint32_t max_connections;
  190. load_balance_e load_balance;
  191. private:
  192. // id => TSocketChannelPtr
  193. std::map<uint32_t, TSocketChannelPtr> channels; // GUAREDE_BY(mutex_)
  194. std::mutex mutex_;
  195. EventLoopThread acceptor_thread;
  196. EventLoopThreadPool worker_threads;
  197. };
  198. typedef TcpServerTmpl<SocketChannel> TcpServer;
  199. }
  200. #endif // HV_TCP_SERVER_HPP_