TcpServer.h 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. #ifndef HV_TCP_SERVER_HPP_
  2. #define HV_TCP_SERVER_HPP_
  3. #include "hsocket.h"
  4. #include "hssl.h"
  5. #include "hlog.h"
  6. #include "EventLoopThreadPool.h"
  7. #include "Channel.h"
  8. namespace hv {
  9. template<class TSocketChannel = SocketChannel>
  10. class TcpServerEventLoopTmpl {
  11. public:
  12. typedef std::shared_ptr<TSocketChannel> TSocketChannelPtr;
  13. TcpServerEventLoopTmpl(EventLoopPtr loop = NULL) {
  14. acceptor_loop = loop ? loop : std::make_shared<EventLoop>();
  15. port = 0;
  16. listenfd = -1;
  17. tls = false;
  18. tls_setting = NULL;
  19. unpack_setting = NULL;
  20. max_connections = 0xFFFFFFFF;
  21. load_balance = LB_RoundRobin;
  22. }
  23. virtual ~TcpServerEventLoopTmpl() {
  24. HV_FREE(tls_setting);
  25. HV_FREE(unpack_setting);
  26. }
  27. EventLoopPtr loop(int idx = -1) {
  28. return worker_threads.loop(idx);
  29. }
  30. //@retval >=0 listenfd, <0 error
  31. int createsocket(int port, const char* host = "0.0.0.0") {
  32. listenfd = Listen(port, host);
  33. if (listenfd < 0) return listenfd;
  34. this->host = host;
  35. this->port = port;
  36. return listenfd;
  37. }
  38. // closesocket thread-safe
  39. void closesocket() {
  40. if (listenfd >= 0) {
  41. hloop_t* loop = acceptor_loop->loop();
  42. if (loop) {
  43. hio_t* listenio = hio_get(loop, listenfd);
  44. assert(listenio != NULL);
  45. hio_close_async(listenio);
  46. }
  47. listenfd = -1;
  48. }
  49. }
  50. void setMaxConnectionNum(uint32_t num) {
  51. max_connections = num;
  52. }
  53. void setLoadBalance(load_balance_e lb) {
  54. load_balance = lb;
  55. }
  56. // NOTE: totalThreadNum = 1 acceptor_thread + N worker_threads (N can be 0)
  57. void setThreadNum(int num) {
  58. worker_threads.setThreadNum(num);
  59. }
  60. int startAccept() {
  61. if (listenfd < 0) {
  62. listenfd = createsocket(port, host.c_str());
  63. if (listenfd < 0) {
  64. hloge("createsocket %s:%d return %d!\n", host.c_str(), port, listenfd);
  65. return listenfd;
  66. }
  67. }
  68. hloop_t* loop = acceptor_loop->loop();
  69. if (loop == NULL) return -2;
  70. hio_t* listenio = haccept(loop, listenfd, onAccept);
  71. assert(listenio != NULL);
  72. hevent_set_userdata(listenio, this);
  73. if (tls) {
  74. hio_enable_ssl(listenio);
  75. if (tls_setting) {
  76. int ret = hio_new_ssl_ctx(listenio, tls_setting);
  77. if (ret != 0) {
  78. hloge("new SSL_CTX failed: %d", ret);
  79. closesocket();
  80. return ret;
  81. }
  82. }
  83. }
  84. return 0;
  85. }
  86. int stopAccept() {
  87. if (listenfd < 0) return -1;
  88. hloop_t* loop = acceptor_loop->loop();
  89. if (loop == NULL) return -2;
  90. hio_t* listenio = hio_get(loop, listenfd);
  91. assert(listenio != NULL);
  92. return hio_del(listenio, HV_READ);
  93. }
  94. // start thread-safe
  95. void start(bool wait_threads_started = true) {
  96. if (worker_threads.threadNum() > 0) {
  97. worker_threads.start(wait_threads_started);
  98. }
  99. acceptor_loop->runInLoop(std::bind(&TcpServerEventLoopTmpl::startAccept, this));
  100. }
  101. // stop thread-safe
  102. void stop(bool wait_threads_stopped = true) {
  103. closesocket();
  104. if (worker_threads.threadNum() > 0) {
  105. worker_threads.stop(wait_threads_stopped);
  106. }
  107. }
  108. int withTLS(hssl_ctx_opt_t* opt = NULL) {
  109. tls = true;
  110. if (opt) {
  111. if (tls_setting == NULL) {
  112. HV_ALLOC_SIZEOF(tls_setting);
  113. }
  114. opt->endpoint = HSSL_SERVER;
  115. *tls_setting = *opt;
  116. }
  117. return 0;
  118. }
  119. void setUnpack(unpack_setting_t* setting) {
  120. if (setting == NULL) {
  121. HV_FREE(unpack_setting);
  122. return;
  123. }
  124. if (unpack_setting == NULL) {
  125. HV_ALLOC_SIZEOF(unpack_setting);
  126. }
  127. *unpack_setting = *setting;
  128. }
  129. // channel
  130. const TSocketChannelPtr& addChannel(hio_t* io) {
  131. uint32_t id = hio_id(io);
  132. auto channel = TSocketChannelPtr(new TSocketChannel(io));
  133. std::lock_guard<std::mutex> locker(mutex_);
  134. channels[id] = channel;
  135. return channels[id];
  136. }
  137. TSocketChannelPtr getChannelById(uint32_t id) {
  138. std::lock_guard<std::mutex> locker(mutex_);
  139. auto iter = channels.find(id);
  140. return iter != channels.end() ? iter->second : NULL;
  141. }
  142. void removeChannel(const TSocketChannelPtr& channel) {
  143. uint32_t id = channel->id();
  144. std::lock_guard<std::mutex> locker(mutex_);
  145. channels.erase(id);
  146. }
  147. size_t connectionNum() {
  148. std::lock_guard<std::mutex> locker(mutex_);
  149. return channels.size();
  150. }
  151. int foreachChannel(std::function<void(const TSocketChannelPtr& channel)> fn) {
  152. std::lock_guard<std::mutex> locker(mutex_);
  153. for (auto& pair : channels) {
  154. fn(pair.second);
  155. }
  156. return channels.size();
  157. }
  158. // broadcast thread-safe
  159. int broadcast(const void* data, int size) {
  160. return foreachChannel([data, size](const TSocketChannelPtr& channel) {
  161. channel->write(data, size);
  162. });
  163. }
  164. int broadcast(const std::string& str) {
  165. return broadcast(str.data(), str.size());
  166. }
  167. private:
  168. static void newConnEvent(hio_t* connio) {
  169. TcpServerEventLoopTmpl* server = (TcpServerEventLoopTmpl*)hevent_userdata(connio);
  170. if (server->connectionNum() >= server->max_connections) {
  171. hlogw("over max_connections");
  172. hio_close(connio);
  173. return;
  174. }
  175. // NOTE: attach to worker loop
  176. EventLoop* worker_loop = currentThreadEventLoop;
  177. assert(worker_loop != NULL);
  178. hio_attach(worker_loop->loop(), connio);
  179. const TSocketChannelPtr& channel = server->addChannel(connio);
  180. channel->status = SocketChannel::CONNECTED;
  181. channel->onread = [server, &channel](Buffer* buf) {
  182. if (server->onMessage) {
  183. server->onMessage(channel, buf);
  184. }
  185. };
  186. channel->onwrite = [server, &channel](Buffer* buf) {
  187. if (server->onWriteComplete) {
  188. server->onWriteComplete(channel, buf);
  189. }
  190. };
  191. channel->onclose = [server, &channel]() {
  192. EventLoop* worker_loop = currentThreadEventLoop;
  193. assert(worker_loop != NULL);
  194. --worker_loop->connectionNum;
  195. channel->status = SocketChannel::CLOSED;
  196. if (server->onConnection) {
  197. server->onConnection(channel);
  198. }
  199. server->removeChannel(channel);
  200. // NOTE: After removeChannel, channel may be destroyed,
  201. // so in this lambda function, no code should be added below.
  202. };
  203. if (server->unpack_setting) {
  204. channel->setUnpack(server->unpack_setting);
  205. }
  206. channel->startRead();
  207. if (server->onConnection) {
  208. server->onConnection(channel);
  209. }
  210. }
  211. static void onAccept(hio_t* connio) {
  212. TcpServerEventLoopTmpl* server = (TcpServerEventLoopTmpl*)hevent_userdata(connio);
  213. // NOTE: detach from acceptor loop
  214. hio_detach(connio);
  215. EventLoopPtr worker_loop = server->worker_threads.nextLoop(server->load_balance);
  216. if (worker_loop == NULL) {
  217. worker_loop = server->acceptor_loop;
  218. }
  219. ++worker_loop->connectionNum;
  220. worker_loop->runInLoop(std::bind(&TcpServerEventLoopTmpl::newConnEvent, connio));
  221. }
  222. public:
  223. std::string host;
  224. int port;
  225. int listenfd;
  226. bool tls;
  227. hssl_ctx_opt_t* tls_setting;
  228. unpack_setting_t* unpack_setting;
  229. // Callback
  230. std::function<void(const TSocketChannelPtr&)> onConnection;
  231. std::function<void(const TSocketChannelPtr&, Buffer*)> onMessage;
  232. // NOTE: Use Channel::isWriteComplete in onWriteComplete callback to determine whether all data has been written.
  233. std::function<void(const TSocketChannelPtr&, Buffer*)> onWriteComplete;
  234. uint32_t max_connections;
  235. load_balance_e load_balance;
  236. private:
  237. // id => TSocketChannelPtr
  238. std::map<uint32_t, TSocketChannelPtr> channels; // GUAREDE_BY(mutex_)
  239. std::mutex mutex_;
  240. EventLoopPtr acceptor_loop;
  241. EventLoopThreadPool worker_threads;
  242. };
  243. template<class TSocketChannel = SocketChannel>
  244. class TcpServerTmpl : private EventLoopThread, public TcpServerEventLoopTmpl<TSocketChannel> {
  245. public:
  246. TcpServerTmpl(EventLoopPtr loop = NULL)
  247. : EventLoopThread(loop)
  248. , TcpServerEventLoopTmpl<TSocketChannel>(EventLoopThread::loop())
  249. , is_loop_owner(loop == NULL)
  250. {}
  251. virtual ~TcpServerTmpl() {
  252. stop(true);
  253. }
  254. const EventLoopPtr& loop(int idx = -1) {
  255. return TcpServerEventLoopTmpl<TSocketChannel>::loop(idx);
  256. }
  257. // start thread-safe
  258. void start(bool wait_threads_started = true) {
  259. TcpServerEventLoopTmpl<TSocketChannel>::start(wait_threads_started);
  260. EventLoopThread::start(wait_threads_started);
  261. }
  262. // stop thread-safe
  263. void stop(bool wait_threads_stopped = true) {
  264. if (is_loop_owner) {
  265. EventLoopThread::stop(wait_threads_stopped);
  266. }
  267. TcpServerEventLoopTmpl<TSocketChannel>::stop(wait_threads_stopped);
  268. }
  269. private:
  270. bool is_loop_owner;
  271. };
  272. typedef TcpServerTmpl<SocketChannel> TcpServer;
  273. }
  274. #endif // HV_TCP_SERVER_HPP_