TcpServer.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. #ifndef HV_TCP_SERVER_HPP_
  2. #define HV_TCP_SERVER_HPP_
  3. #include "hsocket.h"
  4. #include "hssl.h"
  5. #include "hlog.h"
  6. #include "EventLoopThreadPool.h"
  7. #include "Channel.h"
  8. namespace hv {
  9. template<class TSocketChannel = SocketChannel>
  10. class TcpServerEventLoopTmpl {
  11. public:
  12. typedef std::shared_ptr<TSocketChannel> TSocketChannelPtr;
  13. TcpServerEventLoopTmpl(EventLoopPtr loop = NULL) {
  14. acceptor_loop = loop ? loop : std::make_shared<EventLoop>();
  15. listenfd = -1;
  16. tls = false;
  17. unpack_setting.mode = UNPACK_MODE_NONE;
  18. max_connections = 0xFFFFFFFF;
  19. load_balance = LB_RoundRobin;
  20. }
  21. virtual ~TcpServerEventLoopTmpl() {
  22. }
  23. EventLoopPtr loop(int idx = -1) {
  24. return worker_threads.loop(idx);
  25. }
  26. //@retval >=0 listenfd, <0 error
  27. int createsocket(int port, const char* host = "0.0.0.0") {
  28. listenfd = Listen(port, host);
  29. return listenfd;
  30. }
  31. // closesocket thread-safe
  32. void closesocket() {
  33. if (listenfd >= 0) {
  34. hio_close_async(hio_get(acceptor_loop->loop(), listenfd));
  35. listenfd = -1;
  36. }
  37. }
  38. void setMaxConnectionNum(uint32_t num) {
  39. max_connections = num;
  40. }
  41. void setLoadBalance(load_balance_e lb) {
  42. load_balance = lb;
  43. }
  44. // NOTE: totalThreadNum = 1 acceptor_thread + N worker_threads (N can be 0)
  45. void setThreadNum(int num) {
  46. worker_threads.setThreadNum(num);
  47. }
  48. int startAccept() {
  49. assert(listenfd >= 0);
  50. hio_t* listenio = haccept(acceptor_loop->loop(), listenfd, onAccept);
  51. hevent_set_userdata(listenio, this);
  52. if (tls) {
  53. hio_enable_ssl(listenio);
  54. }
  55. return 0;
  56. }
  57. // start thread-safe
  58. void start(bool wait_threads_started = true) {
  59. if (worker_threads.threadNum() > 0) {
  60. worker_threads.start(wait_threads_started);
  61. }
  62. acceptor_loop->runInLoop(std::bind(&TcpServerEventLoopTmpl::startAccept, this));
  63. }
  64. // stop thread-safe
  65. void stop(bool wait_threads_stopped = true) {
  66. if (worker_threads.threadNum() > 0) {
  67. worker_threads.stop(wait_threads_stopped);
  68. }
  69. }
  70. int withTLS(hssl_ctx_opt_t* opt = NULL) {
  71. tls = true;
  72. if (opt) {
  73. opt->endpoint = HSSL_SERVER;
  74. if (hssl_ctx_init(opt) == NULL) {
  75. fprintf(stderr, "hssl_ctx_init failed!\n");
  76. return -1;
  77. }
  78. }
  79. return 0;
  80. }
  81. void setUnpack(unpack_setting_t* setting) {
  82. if (setting) {
  83. unpack_setting = *setting;
  84. } else {
  85. unpack_setting.mode = UNPACK_MODE_NONE;
  86. }
  87. }
  88. // channel
  89. const TSocketChannelPtr& addChannel(hio_t* io) {
  90. uint32_t id = hio_id(io);
  91. auto channel = TSocketChannelPtr(new TSocketChannel(io));
  92. std::lock_guard<std::mutex> locker(mutex_);
  93. channels[id] = channel;
  94. return channels[id];
  95. }
  96. TSocketChannelPtr getChannelById(uint32_t id) {
  97. std::lock_guard<std::mutex> locker(mutex_);
  98. auto iter = channels.find(id);
  99. return iter != channels.end() ? iter->second : NULL;
  100. }
  101. void removeChannel(const TSocketChannelPtr& channel) {
  102. uint32_t id = channel->id();
  103. std::lock_guard<std::mutex> locker(mutex_);
  104. channels.erase(id);
  105. }
  106. size_t connectionNum() {
  107. std::lock_guard<std::mutex> locker(mutex_);
  108. return channels.size();
  109. }
  110. int foreachChannel(std::function<void(const TSocketChannelPtr& channel)> fn) {
  111. std::lock_guard<std::mutex> locker(mutex_);
  112. for (auto& pair : channels) {
  113. fn(pair.second);
  114. }
  115. return channels.size();
  116. }
  117. // broadcast thread-safe
  118. int broadcast(const void* data, int size) {
  119. return foreachChannel([data, size](const TSocketChannelPtr& channel) {
  120. channel->write(data, size);
  121. });
  122. }
  123. int broadcast(const std::string& str) {
  124. return broadcast(str.data(), str.size());
  125. }
  126. private:
  127. static void newConnEvent(hio_t* connio) {
  128. TcpServerEventLoopTmpl* server = (TcpServerEventLoopTmpl*)hevent_userdata(connio);
  129. if (server->connectionNum() >= server->max_connections) {
  130. hlogw("over max_connections");
  131. hio_close(connio);
  132. return;
  133. }
  134. // NOTE: attach to worker loop
  135. EventLoop* worker_loop = currentThreadEventLoop;
  136. assert(worker_loop != NULL);
  137. hio_attach(worker_loop->loop(), connio);
  138. const TSocketChannelPtr& channel = server->addChannel(connio);
  139. channel->status = SocketChannel::CONNECTED;
  140. channel->onread = [server, &channel](Buffer* buf) {
  141. if (server->onMessage) {
  142. server->onMessage(channel, buf);
  143. }
  144. };
  145. channel->onwrite = [server, &channel](Buffer* buf) {
  146. if (server->onWriteComplete) {
  147. server->onWriteComplete(channel, buf);
  148. }
  149. };
  150. channel->onclose = [server, &channel]() {
  151. EventLoop* worker_loop = currentThreadEventLoop;
  152. assert(worker_loop != NULL);
  153. --worker_loop->connectionNum;
  154. channel->status = SocketChannel::CLOSED;
  155. if (server->onConnection) {
  156. server->onConnection(channel);
  157. }
  158. server->removeChannel(channel);
  159. // NOTE: After removeChannel, channel may be destroyed,
  160. // so in this lambda function, no code should be added below.
  161. };
  162. if (server->unpack_setting.mode != UNPACK_MODE_NONE) {
  163. channel->setUnpack(&server->unpack_setting);
  164. }
  165. channel->startRead();
  166. if (server->onConnection) {
  167. server->onConnection(channel);
  168. }
  169. }
  170. static void onAccept(hio_t* connio) {
  171. TcpServerEventLoopTmpl* server = (TcpServerEventLoopTmpl*)hevent_userdata(connio);
  172. // NOTE: detach from acceptor loop
  173. hio_detach(connio);
  174. EventLoopPtr worker_loop = server->worker_threads.nextLoop(server->load_balance);
  175. if (worker_loop == NULL) {
  176. worker_loop = server->acceptor_loop;
  177. }
  178. ++worker_loop->connectionNum;
  179. worker_loop->runInLoop(std::bind(&TcpServerEventLoopTmpl::newConnEvent, connio));
  180. }
  181. public:
  182. int listenfd;
  183. bool tls;
  184. unpack_setting_t unpack_setting;
  185. // Callback
  186. std::function<void(const TSocketChannelPtr&)> onConnection;
  187. std::function<void(const TSocketChannelPtr&, Buffer*)> onMessage;
  188. // NOTE: Use Channel::isWriteComplete in onWriteComplete callback to determine whether all data has been written.
  189. std::function<void(const TSocketChannelPtr&, Buffer*)> onWriteComplete;
  190. uint32_t max_connections;
  191. load_balance_e load_balance;
  192. private:
  193. // id => TSocketChannelPtr
  194. std::map<uint32_t, TSocketChannelPtr> channels; // GUAREDE_BY(mutex_)
  195. std::mutex mutex_;
  196. EventLoopPtr acceptor_loop;
  197. EventLoopThreadPool worker_threads;
  198. };
  199. template<class TSocketChannel = SocketChannel>
  200. class TcpServerTmpl : private EventLoopThread, public TcpServerEventLoopTmpl<TSocketChannel> {
  201. public:
  202. TcpServerTmpl(EventLoopPtr loop = NULL)
  203. : EventLoopThread()
  204. , TcpServerEventLoopTmpl<TSocketChannel>(EventLoopThread::loop())
  205. {}
  206. virtual ~TcpServerTmpl() {
  207. stop(true);
  208. }
  209. const EventLoopPtr& loop(int idx = -1) {
  210. return TcpServerEventLoopTmpl<TSocketChannel>::loop(idx);
  211. }
  212. // start thread-safe
  213. void start(bool wait_threads_started = true) {
  214. TcpServerEventLoopTmpl<TSocketChannel>::start(wait_threads_started);
  215. EventLoopThread::start(wait_threads_started);
  216. }
  217. // stop thread-safe
  218. void stop(bool wait_threads_stopped = true) {
  219. EventLoopThread::stop(wait_threads_stopped);
  220. TcpServerEventLoopTmpl<TSocketChannel>::stop(wait_threads_stopped);
  221. }
  222. };
  223. typedef TcpServerTmpl<SocketChannel> TcpServer;
  224. }
  225. #endif // HV_TCP_SERVER_HPP_