TcpServer.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. #ifndef HV_TCP_SERVER_HPP_
  2. #define HV_TCP_SERVER_HPP_
  3. #include "hsocket.h"
  4. #include "hssl.h"
  5. #include "hlog.h"
  6. #include "EventLoopThreadPool.h"
  7. #include "Channel.h"
  8. namespace hv {
  9. template<class TSocketChannel = SocketChannel>
  10. class TcpServerEventLoopTmpl {
  11. public:
  12. typedef std::shared_ptr<TSocketChannel> TSocketChannelPtr;
  13. TcpServerEventLoopTmpl(EventLoopPtr loop = NULL) {
  14. acceptor_loop = loop ? loop : std::make_shared<EventLoop>();
  15. listenfd = -1;
  16. tls = false;
  17. unpack_setting.mode = UNPACK_MODE_NONE;
  18. max_connections = 0xFFFFFFFF;
  19. load_balance = LB_RoundRobin;
  20. }
  21. virtual ~TcpServerEventLoopTmpl() {
  22. }
  23. EventLoopPtr loop(int idx = -1) {
  24. return worker_threads.loop(idx);
  25. }
  26. //@retval >=0 listenfd, <0 error
  27. int createsocket(int port, const char* host = "0.0.0.0") {
  28. listenfd = Listen(port, host);
  29. if (listenfd < 0) return listenfd;
  30. this->host = host;
  31. this->port = port;
  32. return listenfd;
  33. }
  34. // closesocket thread-safe
  35. void closesocket() {
  36. if (listenfd >= 0) {
  37. hloop_t* loop = acceptor_loop->loop();
  38. if (loop) {
  39. hio_t* listenio = hio_get(loop, listenfd);
  40. assert(listenio != NULL);
  41. hio_close_async(listenio);
  42. }
  43. listenfd = -1;
  44. }
  45. }
  46. void setMaxConnectionNum(uint32_t num) {
  47. max_connections = num;
  48. }
  49. void setLoadBalance(load_balance_e lb) {
  50. load_balance = lb;
  51. }
  52. // NOTE: totalThreadNum = 1 acceptor_thread + N worker_threads (N can be 0)
  53. void setThreadNum(int num) {
  54. worker_threads.setThreadNum(num);
  55. }
  56. int startAccept() {
  57. if (listenfd < 0) {
  58. listenfd = createsocket(port, host.c_str());
  59. if (listenfd < 0) {
  60. hloge("createsocket %s:%d return %d!\n", host.c_str(), port, listenfd);
  61. return listenfd;
  62. }
  63. }
  64. hloop_t* loop = acceptor_loop->loop();
  65. if (loop == NULL) return -2;
  66. hio_t* listenio = haccept(loop, listenfd, onAccept);
  67. assert(listenio != NULL);
  68. hevent_set_userdata(listenio, this);
  69. if (tls) {
  70. hio_enable_ssl(listenio);
  71. }
  72. return 0;
  73. }
  74. int stopAccept() {
  75. if (listenfd < 0) return -1;
  76. hloop_t* loop = acceptor_loop->loop();
  77. if (loop == NULL) return -2;
  78. hio_t* listenio = hio_get(loop, listenfd);
  79. assert(listenio != NULL);
  80. return hio_del(listenio, HV_READ);
  81. }
  82. // start thread-safe
  83. void start(bool wait_threads_started = true) {
  84. if (worker_threads.threadNum() > 0) {
  85. worker_threads.start(wait_threads_started);
  86. }
  87. acceptor_loop->runInLoop(std::bind(&TcpServerEventLoopTmpl::startAccept, this));
  88. }
  89. // stop thread-safe
  90. void stop(bool wait_threads_stopped = true) {
  91. closesocket();
  92. if (worker_threads.threadNum() > 0) {
  93. worker_threads.stop(wait_threads_stopped);
  94. }
  95. }
  96. int withTLS(hssl_ctx_opt_t* opt = NULL) {
  97. tls = true;
  98. if (opt) {
  99. opt->endpoint = HSSL_SERVER;
  100. if (hssl_ctx_init(opt) == NULL) {
  101. fprintf(stderr, "hssl_ctx_init failed!\n");
  102. return -1;
  103. }
  104. }
  105. return 0;
  106. }
  107. void setUnpack(unpack_setting_t* setting) {
  108. if (setting) {
  109. unpack_setting = *setting;
  110. } else {
  111. unpack_setting.mode = UNPACK_MODE_NONE;
  112. }
  113. }
  114. // channel
  115. const TSocketChannelPtr& addChannel(hio_t* io) {
  116. uint32_t id = hio_id(io);
  117. auto channel = TSocketChannelPtr(new TSocketChannel(io));
  118. std::lock_guard<std::mutex> locker(mutex_);
  119. channels[id] = channel;
  120. return channels[id];
  121. }
  122. TSocketChannelPtr getChannelById(uint32_t id) {
  123. std::lock_guard<std::mutex> locker(mutex_);
  124. auto iter = channels.find(id);
  125. return iter != channels.end() ? iter->second : NULL;
  126. }
  127. void removeChannel(const TSocketChannelPtr& channel) {
  128. uint32_t id = channel->id();
  129. std::lock_guard<std::mutex> locker(mutex_);
  130. channels.erase(id);
  131. }
  132. size_t connectionNum() {
  133. std::lock_guard<std::mutex> locker(mutex_);
  134. return channels.size();
  135. }
  136. int foreachChannel(std::function<void(const TSocketChannelPtr& channel)> fn) {
  137. std::lock_guard<std::mutex> locker(mutex_);
  138. for (auto& pair : channels) {
  139. fn(pair.second);
  140. }
  141. return channels.size();
  142. }
  143. // broadcast thread-safe
  144. int broadcast(const void* data, int size) {
  145. return foreachChannel([data, size](const TSocketChannelPtr& channel) {
  146. channel->write(data, size);
  147. });
  148. }
  149. int broadcast(const std::string& str) {
  150. return broadcast(str.data(), str.size());
  151. }
  152. private:
  153. static void newConnEvent(hio_t* connio) {
  154. TcpServerEventLoopTmpl* server = (TcpServerEventLoopTmpl*)hevent_userdata(connio);
  155. if (server->connectionNum() >= server->max_connections) {
  156. hlogw("over max_connections");
  157. hio_close(connio);
  158. return;
  159. }
  160. // NOTE: attach to worker loop
  161. EventLoop* worker_loop = currentThreadEventLoop;
  162. assert(worker_loop != NULL);
  163. hio_attach(worker_loop->loop(), connio);
  164. const TSocketChannelPtr& channel = server->addChannel(connio);
  165. channel->status = SocketChannel::CONNECTED;
  166. channel->onread = [server, &channel](Buffer* buf) {
  167. if (server->onMessage) {
  168. server->onMessage(channel, buf);
  169. }
  170. };
  171. channel->onwrite = [server, &channel](Buffer* buf) {
  172. if (server->onWriteComplete) {
  173. server->onWriteComplete(channel, buf);
  174. }
  175. };
  176. channel->onclose = [server, &channel]() {
  177. EventLoop* worker_loop = currentThreadEventLoop;
  178. assert(worker_loop != NULL);
  179. --worker_loop->connectionNum;
  180. channel->status = SocketChannel::CLOSED;
  181. if (server->onConnection) {
  182. server->onConnection(channel);
  183. }
  184. server->removeChannel(channel);
  185. // NOTE: After removeChannel, channel may be destroyed,
  186. // so in this lambda function, no code should be added below.
  187. };
  188. if (server->unpack_setting.mode != UNPACK_MODE_NONE) {
  189. channel->setUnpack(&server->unpack_setting);
  190. }
  191. channel->startRead();
  192. if (server->onConnection) {
  193. server->onConnection(channel);
  194. }
  195. }
  196. static void onAccept(hio_t* connio) {
  197. TcpServerEventLoopTmpl* server = (TcpServerEventLoopTmpl*)hevent_userdata(connio);
  198. // NOTE: detach from acceptor loop
  199. hio_detach(connio);
  200. EventLoopPtr worker_loop = server->worker_threads.nextLoop(server->load_balance);
  201. if (worker_loop == NULL) {
  202. worker_loop = server->acceptor_loop;
  203. }
  204. ++worker_loop->connectionNum;
  205. worker_loop->runInLoop(std::bind(&TcpServerEventLoopTmpl::newConnEvent, connio));
  206. }
  207. public:
  208. std::string host;
  209. int port;
  210. int listenfd;
  211. bool tls;
  212. unpack_setting_t unpack_setting;
  213. // Callback
  214. std::function<void(const TSocketChannelPtr&)> onConnection;
  215. std::function<void(const TSocketChannelPtr&, Buffer*)> onMessage;
  216. // NOTE: Use Channel::isWriteComplete in onWriteComplete callback to determine whether all data has been written.
  217. std::function<void(const TSocketChannelPtr&, Buffer*)> onWriteComplete;
  218. uint32_t max_connections;
  219. load_balance_e load_balance;
  220. private:
  221. // id => TSocketChannelPtr
  222. std::map<uint32_t, TSocketChannelPtr> channels; // GUAREDE_BY(mutex_)
  223. std::mutex mutex_;
  224. EventLoopPtr acceptor_loop;
  225. EventLoopThreadPool worker_threads;
  226. };
  227. template<class TSocketChannel = SocketChannel>
  228. class TcpServerTmpl : private EventLoopThread, public TcpServerEventLoopTmpl<TSocketChannel> {
  229. public:
  230. TcpServerTmpl(EventLoopPtr loop = NULL)
  231. : EventLoopThread(loop)
  232. , TcpServerEventLoopTmpl<TSocketChannel>(EventLoopThread::loop())
  233. {}
  234. virtual ~TcpServerTmpl() {
  235. stop(true);
  236. }
  237. const EventLoopPtr& loop(int idx = -1) {
  238. return TcpServerEventLoopTmpl<TSocketChannel>::loop(idx);
  239. }
  240. // start thread-safe
  241. void start(bool wait_threads_started = true) {
  242. TcpServerEventLoopTmpl<TSocketChannel>::start(wait_threads_started);
  243. EventLoopThread::start(wait_threads_started);
  244. }
  245. // stop thread-safe
  246. void stop(bool wait_threads_stopped = true) {
  247. EventLoopThread::stop(wait_threads_stopped);
  248. TcpServerEventLoopTmpl<TSocketChannel>::stop(wait_threads_stopped);
  249. }
  250. };
  251. typedef TcpServerTmpl<SocketChannel> TcpServer;
  252. }
  253. #endif // HV_TCP_SERVER_HPP_