TcpServer.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. #ifndef HV_TCP_SERVER_HPP_
  2. #define HV_TCP_SERVER_HPP_
  3. #include "hsocket.h"
  4. #include "hssl.h"
  5. #include "hlog.h"
  6. #include "EventLoopThreadPool.h"
  7. #include "Channel.h"
  8. namespace hv {
  9. template<class TSocketChannel = SocketChannel>
  10. class TcpServerEventLoopTmpl {
  11. public:
  12. typedef std::shared_ptr<TSocketChannel> TSocketChannelPtr;
  13. TcpServerEventLoopTmpl(EventLoopPtr loop = NULL) {
  14. acceptor_loop = loop ? loop : std::make_shared<EventLoop>();
  15. listenfd = -1;
  16. tls = false;
  17. unpack_setting.mode = UNPACK_MODE_NONE;
  18. max_connections = 0xFFFFFFFF;
  19. load_balance = LB_RoundRobin;
  20. }
  21. virtual ~TcpServerEventLoopTmpl() {
  22. }
  23. EventLoopPtr loop(int idx = -1) {
  24. return worker_threads.loop(idx);
  25. }
  26. //@retval >=0 listenfd, <0 error
  27. int createsocket(int port, const char* host = "0.0.0.0") {
  28. listenfd = Listen(port, host);
  29. return listenfd;
  30. }
  31. // closesocket thread-safe
  32. void closesocket() {
  33. if (listenfd >= 0) {
  34. hloop_t* loop = acceptor_loop->loop();
  35. if (loop) {
  36. hio_t* listenio = hio_get(loop, listenfd);
  37. assert(listenio != NULL);
  38. hio_close_async(listenio);
  39. }
  40. listenfd = -1;
  41. }
  42. }
  43. void setMaxConnectionNum(uint32_t num) {
  44. max_connections = num;
  45. }
  46. void setLoadBalance(load_balance_e lb) {
  47. load_balance = lb;
  48. }
  49. // NOTE: totalThreadNum = 1 acceptor_thread + N worker_threads (N can be 0)
  50. void setThreadNum(int num) {
  51. worker_threads.setThreadNum(num);
  52. }
  53. int startAccept() {
  54. if (listenfd < 0) return -1;
  55. hloop_t* loop = acceptor_loop->loop();
  56. if (loop == NULL) return -2;
  57. hio_t* listenio = haccept(loop, listenfd, onAccept);
  58. assert(listenio != NULL);
  59. hevent_set_userdata(listenio, this);
  60. if (tls) {
  61. hio_enable_ssl(listenio);
  62. }
  63. return 0;
  64. }
  65. int stopAccept() {
  66. if (listenfd < 0) return -1;
  67. hloop_t* loop = acceptor_loop->loop();
  68. if (loop == NULL) return -2;
  69. hio_t* listenio = hio_get(loop, listenfd);
  70. assert(listenio != NULL);
  71. return hio_del(listenio, HV_READ);
  72. }
  73. // start thread-safe
  74. void start(bool wait_threads_started = true) {
  75. if (worker_threads.threadNum() > 0) {
  76. worker_threads.start(wait_threads_started);
  77. }
  78. acceptor_loop->runInLoop(std::bind(&TcpServerEventLoopTmpl::startAccept, this));
  79. }
  80. // stop thread-safe
  81. void stop(bool wait_threads_stopped = true) {
  82. closesocket();
  83. if (worker_threads.threadNum() > 0) {
  84. worker_threads.stop(wait_threads_stopped);
  85. }
  86. }
  87. int withTLS(hssl_ctx_opt_t* opt = NULL) {
  88. tls = true;
  89. if (opt) {
  90. opt->endpoint = HSSL_SERVER;
  91. if (hssl_ctx_init(opt) == NULL) {
  92. fprintf(stderr, "hssl_ctx_init failed!\n");
  93. return -1;
  94. }
  95. }
  96. return 0;
  97. }
  98. void setUnpack(unpack_setting_t* setting) {
  99. if (setting) {
  100. unpack_setting = *setting;
  101. } else {
  102. unpack_setting.mode = UNPACK_MODE_NONE;
  103. }
  104. }
  105. // channel
  106. const TSocketChannelPtr& addChannel(hio_t* io) {
  107. uint32_t id = hio_id(io);
  108. auto channel = TSocketChannelPtr(new TSocketChannel(io));
  109. std::lock_guard<std::mutex> locker(mutex_);
  110. channels[id] = channel;
  111. return channels[id];
  112. }
  113. TSocketChannelPtr getChannelById(uint32_t id) {
  114. std::lock_guard<std::mutex> locker(mutex_);
  115. auto iter = channels.find(id);
  116. return iter != channels.end() ? iter->second : NULL;
  117. }
  118. void removeChannel(const TSocketChannelPtr& channel) {
  119. uint32_t id = channel->id();
  120. std::lock_guard<std::mutex> locker(mutex_);
  121. channels.erase(id);
  122. }
  123. size_t connectionNum() {
  124. std::lock_guard<std::mutex> locker(mutex_);
  125. return channels.size();
  126. }
  127. int foreachChannel(std::function<void(const TSocketChannelPtr& channel)> fn) {
  128. std::lock_guard<std::mutex> locker(mutex_);
  129. for (auto& pair : channels) {
  130. fn(pair.second);
  131. }
  132. return channels.size();
  133. }
  134. // broadcast thread-safe
  135. int broadcast(const void* data, int size) {
  136. return foreachChannel([data, size](const TSocketChannelPtr& channel) {
  137. channel->write(data, size);
  138. });
  139. }
  140. int broadcast(const std::string& str) {
  141. return broadcast(str.data(), str.size());
  142. }
  143. private:
  144. static void newConnEvent(hio_t* connio) {
  145. TcpServerEventLoopTmpl* server = (TcpServerEventLoopTmpl*)hevent_userdata(connio);
  146. if (server->connectionNum() >= server->max_connections) {
  147. hlogw("over max_connections");
  148. hio_close(connio);
  149. return;
  150. }
  151. // NOTE: attach to worker loop
  152. EventLoop* worker_loop = currentThreadEventLoop;
  153. assert(worker_loop != NULL);
  154. hio_attach(worker_loop->loop(), connio);
  155. const TSocketChannelPtr& channel = server->addChannel(connio);
  156. channel->status = SocketChannel::CONNECTED;
  157. channel->onread = [server, &channel](Buffer* buf) {
  158. if (server->onMessage) {
  159. server->onMessage(channel, buf);
  160. }
  161. };
  162. channel->onwrite = [server, &channel](Buffer* buf) {
  163. if (server->onWriteComplete) {
  164. server->onWriteComplete(channel, buf);
  165. }
  166. };
  167. channel->onclose = [server, &channel]() {
  168. EventLoop* worker_loop = currentThreadEventLoop;
  169. assert(worker_loop != NULL);
  170. --worker_loop->connectionNum;
  171. channel->status = SocketChannel::CLOSED;
  172. if (server->onConnection) {
  173. server->onConnection(channel);
  174. }
  175. server->removeChannel(channel);
  176. // NOTE: After removeChannel, channel may be destroyed,
  177. // so in this lambda function, no code should be added below.
  178. };
  179. if (server->unpack_setting.mode != UNPACK_MODE_NONE) {
  180. channel->setUnpack(&server->unpack_setting);
  181. }
  182. channel->startRead();
  183. if (server->onConnection) {
  184. server->onConnection(channel);
  185. }
  186. }
  187. static void onAccept(hio_t* connio) {
  188. TcpServerEventLoopTmpl* server = (TcpServerEventLoopTmpl*)hevent_userdata(connio);
  189. // NOTE: detach from acceptor loop
  190. hio_detach(connio);
  191. EventLoopPtr worker_loop = server->worker_threads.nextLoop(server->load_balance);
  192. if (worker_loop == NULL) {
  193. worker_loop = server->acceptor_loop;
  194. }
  195. ++worker_loop->connectionNum;
  196. worker_loop->runInLoop(std::bind(&TcpServerEventLoopTmpl::newConnEvent, connio));
  197. }
  198. public:
  199. int listenfd;
  200. bool tls;
  201. unpack_setting_t unpack_setting;
  202. // Callback
  203. std::function<void(const TSocketChannelPtr&)> onConnection;
  204. std::function<void(const TSocketChannelPtr&, Buffer*)> onMessage;
  205. // NOTE: Use Channel::isWriteComplete in onWriteComplete callback to determine whether all data has been written.
  206. std::function<void(const TSocketChannelPtr&, Buffer*)> onWriteComplete;
  207. uint32_t max_connections;
  208. load_balance_e load_balance;
  209. private:
  210. // id => TSocketChannelPtr
  211. std::map<uint32_t, TSocketChannelPtr> channels; // GUAREDE_BY(mutex_)
  212. std::mutex mutex_;
  213. EventLoopPtr acceptor_loop;
  214. EventLoopThreadPool worker_threads;
  215. };
  216. template<class TSocketChannel = SocketChannel>
  217. class TcpServerTmpl : private EventLoopThread, public TcpServerEventLoopTmpl<TSocketChannel> {
  218. public:
  219. TcpServerTmpl(EventLoopPtr loop = NULL)
  220. : EventLoopThread()
  221. , TcpServerEventLoopTmpl<TSocketChannel>(EventLoopThread::loop())
  222. {}
  223. virtual ~TcpServerTmpl() {
  224. stop(true);
  225. }
  226. const EventLoopPtr& loop(int idx = -1) {
  227. return TcpServerEventLoopTmpl<TSocketChannel>::loop(idx);
  228. }
  229. // start thread-safe
  230. void start(bool wait_threads_started = true) {
  231. TcpServerEventLoopTmpl<TSocketChannel>::start(wait_threads_started);
  232. EventLoopThread::start(wait_threads_started);
  233. }
  234. // stop thread-safe
  235. void stop(bool wait_threads_stopped = true) {
  236. EventLoopThread::stop(wait_threads_stopped);
  237. TcpServerEventLoopTmpl<TSocketChannel>::stop(wait_threads_stopped);
  238. }
  239. };
  240. typedef TcpServerTmpl<SocketChannel> TcpServer;
  241. }
  242. #endif // HV_TCP_SERVER_HPP_