1
0

ikcp.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299
  1. //=====================================================================
  2. //
  3. // KCP - A Better ARQ Protocol Implementation
  4. // skywind3000 (at) gmail.com, 2010-2011
  5. //
  6. // Features:
  7. // + Average RTT reduce 30% - 40% vs traditional ARQ like tcp.
  8. // + Maximum RTT reduce three times vs tcp.
  9. // + Lightweight, distributed as a single source file.
  10. //
  11. //=====================================================================
  12. #include "ikcp.h"
  13. #include <stddef.h>
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include <stdarg.h>
  17. #include <stdio.h>
  18. //=====================================================================
  19. // KCP BASIC
  20. //=====================================================================
  21. const IUINT32 IKCP_RTO_NDL = 30; // no delay min rto
  22. const IUINT32 IKCP_RTO_MIN = 100; // normal min rto
  23. const IUINT32 IKCP_RTO_DEF = 200;
  24. const IUINT32 IKCP_RTO_MAX = 60000;
  25. const IUINT32 IKCP_CMD_PUSH = 81; // cmd: push data
  26. const IUINT32 IKCP_CMD_ACK = 82; // cmd: ack
  27. const IUINT32 IKCP_CMD_WASK = 83; // cmd: window probe (ask)
  28. const IUINT32 IKCP_CMD_WINS = 84; // cmd: window size (tell)
  29. const IUINT32 IKCP_ASK_SEND = 1; // need to send IKCP_CMD_WASK
  30. const IUINT32 IKCP_ASK_TELL = 2; // need to send IKCP_CMD_WINS
  31. const IUINT32 IKCP_WND_SND = 32;
  32. const IUINT32 IKCP_WND_RCV = 128; // must >= max fragment size
  33. const IUINT32 IKCP_MTU_DEF = 1400;
  34. const IUINT32 IKCP_ACK_FAST = 3;
  35. const IUINT32 IKCP_INTERVAL = 100;
  36. const IUINT32 IKCP_OVERHEAD = 24;
  37. const IUINT32 IKCP_DEADLINK = 20;
  38. const IUINT32 IKCP_THRESH_INIT = 2;
  39. const IUINT32 IKCP_THRESH_MIN = 2;
  40. const IUINT32 IKCP_PROBE_INIT = 7000; // 7 secs to probe window size
  41. const IUINT32 IKCP_PROBE_LIMIT = 120000; // up to 120 secs to probe window
  42. const IUINT32 IKCP_FASTACK_LIMIT = 5; // max times to trigger fastack
  43. //---------------------------------------------------------------------
  44. // encode / decode
  45. //---------------------------------------------------------------------
  46. /* encode 8 bits unsigned int */
  47. static inline char *ikcp_encode8u(char *p, unsigned char c)
  48. {
  49. *(unsigned char*)p++ = c;
  50. return p;
  51. }
  52. /* decode 8 bits unsigned int */
  53. static inline const char *ikcp_decode8u(const char *p, unsigned char *c)
  54. {
  55. *c = *(unsigned char*)p++;
  56. return p;
  57. }
  58. /* encode 16 bits unsigned int (lsb) */
  59. static inline char *ikcp_encode16u(char *p, unsigned short w)
  60. {
  61. #if IWORDS_BIG_ENDIAN || IWORDS_MUST_ALIGN
  62. *(unsigned char*)(p + 0) = (w & 255);
  63. *(unsigned char*)(p + 1) = (w >> 8);
  64. #else
  65. memcpy(p, &w, 2);
  66. #endif
  67. p += 2;
  68. return p;
  69. }
  70. /* decode 16 bits unsigned int (lsb) */
  71. static inline const char *ikcp_decode16u(const char *p, unsigned short *w)
  72. {
  73. #if IWORDS_BIG_ENDIAN || IWORDS_MUST_ALIGN
  74. *w = *(const unsigned char*)(p + 1);
  75. *w = *(const unsigned char*)(p + 0) + (*w << 8);
  76. #else
  77. memcpy(w, p, 2);
  78. #endif
  79. p += 2;
  80. return p;
  81. }
  82. /* encode 32 bits unsigned int (lsb) */
  83. static inline char *ikcp_encode32u(char *p, IUINT32 l)
  84. {
  85. #if IWORDS_BIG_ENDIAN || IWORDS_MUST_ALIGN
  86. *(unsigned char*)(p + 0) = (unsigned char)((l >> 0) & 0xff);
  87. *(unsigned char*)(p + 1) = (unsigned char)((l >> 8) & 0xff);
  88. *(unsigned char*)(p + 2) = (unsigned char)((l >> 16) & 0xff);
  89. *(unsigned char*)(p + 3) = (unsigned char)((l >> 24) & 0xff);
  90. #else
  91. memcpy(p, &l, 4);
  92. #endif
  93. p += 4;
  94. return p;
  95. }
  96. /* decode 32 bits unsigned int (lsb) */
  97. static inline const char *ikcp_decode32u(const char *p, IUINT32 *l)
  98. {
  99. #if IWORDS_BIG_ENDIAN || IWORDS_MUST_ALIGN
  100. *l = *(const unsigned char*)(p + 3);
  101. *l = *(const unsigned char*)(p + 2) + (*l << 8);
  102. *l = *(const unsigned char*)(p + 1) + (*l << 8);
  103. *l = *(const unsigned char*)(p + 0) + (*l << 8);
  104. #else
  105. memcpy(l, p, 4);
  106. #endif
  107. p += 4;
  108. return p;
  109. }
  110. static inline IUINT32 _imin_(IUINT32 a, IUINT32 b) {
  111. return a <= b ? a : b;
  112. }
  113. static inline IUINT32 _imax_(IUINT32 a, IUINT32 b) {
  114. return a >= b ? a : b;
  115. }
  116. static inline IUINT32 _ibound_(IUINT32 lower, IUINT32 middle, IUINT32 upper)
  117. {
  118. return _imin_(_imax_(lower, middle), upper);
  119. }
  120. static inline long _itimediff(IUINT32 later, IUINT32 earlier)
  121. {
  122. return ((IINT32)(later - earlier));
  123. }
  124. //---------------------------------------------------------------------
  125. // manage segment
  126. //---------------------------------------------------------------------
  127. typedef struct IKCPSEG IKCPSEG;
  128. static void* (*ikcp_malloc_hook)(size_t) = NULL;
  129. static void (*ikcp_free_hook)(void *) = NULL;
  130. // internal malloc
  131. static void* ikcp_malloc(size_t size) {
  132. if (ikcp_malloc_hook)
  133. return ikcp_malloc_hook(size);
  134. return malloc(size);
  135. }
  136. // internal free
  137. static void ikcp_free(void *ptr) {
  138. if (ikcp_free_hook) {
  139. ikcp_free_hook(ptr);
  140. } else {
  141. free(ptr);
  142. }
  143. }
  144. // redefine allocator
  145. void ikcp_allocator(void* (*new_malloc)(size_t), void (*new_free)(void*))
  146. {
  147. ikcp_malloc_hook = new_malloc;
  148. ikcp_free_hook = new_free;
  149. }
  150. // allocate a new kcp segment
  151. static IKCPSEG* ikcp_segment_new(ikcpcb *kcp, int size)
  152. {
  153. return (IKCPSEG*)ikcp_malloc(sizeof(IKCPSEG) + size);
  154. }
  155. // delete a segment
  156. static void ikcp_segment_delete(ikcpcb *kcp, IKCPSEG *seg)
  157. {
  158. ikcp_free(seg);
  159. }
  160. // write log
  161. void ikcp_log(ikcpcb *kcp, int mask, const char *fmt, ...)
  162. {
  163. char buffer[1024];
  164. va_list argptr;
  165. if ((mask & kcp->logmask) == 0 || kcp->writelog == 0) return;
  166. va_start(argptr, fmt);
  167. vsprintf(buffer, fmt, argptr);
  168. va_end(argptr);
  169. kcp->writelog(buffer, kcp, kcp->user);
  170. }
  171. // check log mask
  172. static int ikcp_canlog(const ikcpcb *kcp, int mask)
  173. {
  174. if ((mask & kcp->logmask) == 0 || kcp->writelog == NULL) return 0;
  175. return 1;
  176. }
  177. // output segment
  178. static int ikcp_output(ikcpcb *kcp, const void *data, int size)
  179. {
  180. assert(kcp);
  181. assert(kcp->output);
  182. if (ikcp_canlog(kcp, IKCP_LOG_OUTPUT)) {
  183. ikcp_log(kcp, IKCP_LOG_OUTPUT, "[RO] %ld bytes", (long)size);
  184. }
  185. if (size == 0) return 0;
  186. return kcp->output((const char*)data, size, kcp, kcp->user);
  187. }
  188. // output queue
  189. void ikcp_qprint(const char *name, const struct IQUEUEHEAD *head)
  190. {
  191. #if 0
  192. const struct IQUEUEHEAD *p;
  193. printf("<%s>: [", name);
  194. for (p = head->next; p != head; p = p->next) {
  195. const IKCPSEG *seg = iqueue_entry(p, const IKCPSEG, node);
  196. printf("(%lu %d)", (unsigned long)seg->sn, (int)(seg->ts % 10000));
  197. if (p->next != head) printf(",");
  198. }
  199. printf("]\n");
  200. #endif
  201. }
  202. //---------------------------------------------------------------------
  203. // create a new kcpcb
  204. //---------------------------------------------------------------------
  205. ikcpcb* ikcp_create(IUINT32 conv, void *user)
  206. {
  207. ikcpcb *kcp = (ikcpcb*)ikcp_malloc(sizeof(struct IKCPCB));
  208. if (kcp == NULL) return NULL;
  209. kcp->conv = conv;
  210. kcp->user = user;
  211. kcp->snd_una = 0;
  212. kcp->snd_nxt = 0;
  213. kcp->rcv_nxt = 0;
  214. kcp->ts_recent = 0;
  215. kcp->ts_lastack = 0;
  216. kcp->ts_probe = 0;
  217. kcp->probe_wait = 0;
  218. kcp->snd_wnd = IKCP_WND_SND;
  219. kcp->rcv_wnd = IKCP_WND_RCV;
  220. kcp->rmt_wnd = IKCP_WND_RCV;
  221. kcp->cwnd = 0;
  222. kcp->incr = 0;
  223. kcp->probe = 0;
  224. kcp->mtu = IKCP_MTU_DEF;
  225. kcp->mss = kcp->mtu - IKCP_OVERHEAD;
  226. kcp->stream = 0;
  227. kcp->buffer = (char*)ikcp_malloc((kcp->mtu + IKCP_OVERHEAD) * 3);
  228. if (kcp->buffer == NULL) {
  229. ikcp_free(kcp);
  230. return NULL;
  231. }
  232. iqueue_init(&kcp->snd_queue);
  233. iqueue_init(&kcp->rcv_queue);
  234. iqueue_init(&kcp->snd_buf);
  235. iqueue_init(&kcp->rcv_buf);
  236. kcp->nrcv_buf = 0;
  237. kcp->nsnd_buf = 0;
  238. kcp->nrcv_que = 0;
  239. kcp->nsnd_que = 0;
  240. kcp->state = 0;
  241. kcp->acklist = NULL;
  242. kcp->ackblock = 0;
  243. kcp->ackcount = 0;
  244. kcp->rx_srtt = 0;
  245. kcp->rx_rttval = 0;
  246. kcp->rx_rto = IKCP_RTO_DEF;
  247. kcp->rx_minrto = IKCP_RTO_MIN;
  248. kcp->current = 0;
  249. kcp->interval = IKCP_INTERVAL;
  250. kcp->ts_flush = IKCP_INTERVAL;
  251. kcp->nodelay = 0;
  252. kcp->updated = 0;
  253. kcp->logmask = 0;
  254. kcp->ssthresh = IKCP_THRESH_INIT;
  255. kcp->fastresend = 0;
  256. kcp->fastlimit = IKCP_FASTACK_LIMIT;
  257. kcp->nocwnd = 0;
  258. kcp->xmit = 0;
  259. kcp->dead_link = IKCP_DEADLINK;
  260. kcp->output = NULL;
  261. kcp->writelog = NULL;
  262. return kcp;
  263. }
  264. //---------------------------------------------------------------------
  265. // release a new kcpcb
  266. //---------------------------------------------------------------------
  267. void ikcp_release(ikcpcb *kcp)
  268. {
  269. assert(kcp);
  270. if (kcp) {
  271. IKCPSEG *seg;
  272. while (!iqueue_is_empty(&kcp->snd_buf)) {
  273. seg = iqueue_entry(kcp->snd_buf.next, IKCPSEG, node);
  274. iqueue_del(&seg->node);
  275. ikcp_segment_delete(kcp, seg);
  276. }
  277. while (!iqueue_is_empty(&kcp->rcv_buf)) {
  278. seg = iqueue_entry(kcp->rcv_buf.next, IKCPSEG, node);
  279. iqueue_del(&seg->node);
  280. ikcp_segment_delete(kcp, seg);
  281. }
  282. while (!iqueue_is_empty(&kcp->snd_queue)) {
  283. seg = iqueue_entry(kcp->snd_queue.next, IKCPSEG, node);
  284. iqueue_del(&seg->node);
  285. ikcp_segment_delete(kcp, seg);
  286. }
  287. while (!iqueue_is_empty(&kcp->rcv_queue)) {
  288. seg = iqueue_entry(kcp->rcv_queue.next, IKCPSEG, node);
  289. iqueue_del(&seg->node);
  290. ikcp_segment_delete(kcp, seg);
  291. }
  292. if (kcp->buffer) {
  293. ikcp_free(kcp->buffer);
  294. }
  295. if (kcp->acklist) {
  296. ikcp_free(kcp->acklist);
  297. }
  298. kcp->nrcv_buf = 0;
  299. kcp->nsnd_buf = 0;
  300. kcp->nrcv_que = 0;
  301. kcp->nsnd_que = 0;
  302. kcp->ackcount = 0;
  303. kcp->buffer = NULL;
  304. kcp->acklist = NULL;
  305. ikcp_free(kcp);
  306. }
  307. }
  308. //---------------------------------------------------------------------
  309. // set output callback, which will be invoked by kcp
  310. //---------------------------------------------------------------------
  311. void ikcp_setoutput(ikcpcb *kcp, int (*output)(const char *buf, int len,
  312. ikcpcb *kcp, void *user))
  313. {
  314. kcp->output = output;
  315. }
  316. //---------------------------------------------------------------------
  317. // user/upper level recv: returns size, returns below zero for EAGAIN
  318. //---------------------------------------------------------------------
  319. int ikcp_recv(ikcpcb *kcp, char *buffer, int len)
  320. {
  321. struct IQUEUEHEAD *p;
  322. int ispeek = (len < 0)? 1 : 0;
  323. int peeksize;
  324. int recover = 0;
  325. IKCPSEG *seg;
  326. assert(kcp);
  327. if (iqueue_is_empty(&kcp->rcv_queue))
  328. return -1;
  329. if (len < 0) len = -len;
  330. peeksize = ikcp_peeksize(kcp);
  331. if (peeksize < 0)
  332. return -2;
  333. if (peeksize > len)
  334. return -3;
  335. if (kcp->nrcv_que >= kcp->rcv_wnd)
  336. recover = 1;
  337. // merge fragment
  338. for (len = 0, p = kcp->rcv_queue.next; p != &kcp->rcv_queue; ) {
  339. int fragment;
  340. seg = iqueue_entry(p, IKCPSEG, node);
  341. p = p->next;
  342. if (buffer) {
  343. memcpy(buffer, seg->data, seg->len);
  344. buffer += seg->len;
  345. }
  346. len += seg->len;
  347. fragment = seg->frg;
  348. if (ikcp_canlog(kcp, IKCP_LOG_RECV)) {
  349. ikcp_log(kcp, IKCP_LOG_RECV, "recv sn=%lu", (unsigned long)seg->sn);
  350. }
  351. if (ispeek == 0) {
  352. iqueue_del(&seg->node);
  353. ikcp_segment_delete(kcp, seg);
  354. kcp->nrcv_que--;
  355. }
  356. if (fragment == 0)
  357. break;
  358. }
  359. assert(len == peeksize);
  360. // move available data from rcv_buf -> rcv_queue
  361. while (! iqueue_is_empty(&kcp->rcv_buf)) {
  362. seg = iqueue_entry(kcp->rcv_buf.next, IKCPSEG, node);
  363. if (seg->sn == kcp->rcv_nxt && kcp->nrcv_que < kcp->rcv_wnd) {
  364. iqueue_del(&seg->node);
  365. kcp->nrcv_buf--;
  366. iqueue_add_tail(&seg->node, &kcp->rcv_queue);
  367. kcp->nrcv_que++;
  368. kcp->rcv_nxt++;
  369. } else {
  370. break;
  371. }
  372. }
  373. // fast recover
  374. if (kcp->nrcv_que < kcp->rcv_wnd && recover) {
  375. // ready to send back IKCP_CMD_WINS in ikcp_flush
  376. // tell remote my window size
  377. kcp->probe |= IKCP_ASK_TELL;
  378. }
  379. return len;
  380. }
  381. //---------------------------------------------------------------------
  382. // peek data size
  383. //---------------------------------------------------------------------
  384. int ikcp_peeksize(const ikcpcb *kcp)
  385. {
  386. struct IQUEUEHEAD *p;
  387. IKCPSEG *seg;
  388. int length = 0;
  389. assert(kcp);
  390. if (iqueue_is_empty(&kcp->rcv_queue)) return -1;
  391. seg = iqueue_entry(kcp->rcv_queue.next, IKCPSEG, node);
  392. if (seg->frg == 0) return seg->len;
  393. if (kcp->nrcv_que < seg->frg + 1) return -1;
  394. for (p = kcp->rcv_queue.next; p != &kcp->rcv_queue; p = p->next) {
  395. seg = iqueue_entry(p, IKCPSEG, node);
  396. length += seg->len;
  397. if (seg->frg == 0) break;
  398. }
  399. return length;
  400. }
  401. //---------------------------------------------------------------------
  402. // user/upper level send, returns below zero for error
  403. //---------------------------------------------------------------------
  404. int ikcp_send(ikcpcb *kcp, const char *buffer, int len)
  405. {
  406. IKCPSEG *seg;
  407. int count, i;
  408. assert(kcp->mss > 0);
  409. if (len < 0) return -1;
  410. // append to previous segment in streaming mode (if possible)
  411. if (kcp->stream != 0) {
  412. if (!iqueue_is_empty(&kcp->snd_queue)) {
  413. IKCPSEG *old = iqueue_entry(kcp->snd_queue.prev, IKCPSEG, node);
  414. if (old->len < kcp->mss) {
  415. int capacity = kcp->mss - old->len;
  416. int extend = (len < capacity)? len : capacity;
  417. seg = ikcp_segment_new(kcp, old->len + extend);
  418. assert(seg);
  419. if (seg == NULL) {
  420. return -2;
  421. }
  422. iqueue_add_tail(&seg->node, &kcp->snd_queue);
  423. memcpy(seg->data, old->data, old->len);
  424. if (buffer) {
  425. memcpy(seg->data + old->len, buffer, extend);
  426. buffer += extend;
  427. }
  428. seg->len = old->len + extend;
  429. seg->frg = 0;
  430. len -= extend;
  431. iqueue_del_init(&old->node);
  432. ikcp_segment_delete(kcp, old);
  433. }
  434. }
  435. if (len <= 0) {
  436. return 0;
  437. }
  438. }
  439. if (len <= (int)kcp->mss) count = 1;
  440. else count = (len + kcp->mss - 1) / kcp->mss;
  441. if (count >= (int)IKCP_WND_RCV) return -2;
  442. if (count == 0) count = 1;
  443. // fragment
  444. for (i = 0; i < count; i++) {
  445. int size = len > (int)kcp->mss ? (int)kcp->mss : len;
  446. seg = ikcp_segment_new(kcp, size);
  447. assert(seg);
  448. if (seg == NULL) {
  449. return -2;
  450. }
  451. if (buffer && len > 0) {
  452. memcpy(seg->data, buffer, size);
  453. }
  454. seg->len = size;
  455. seg->frg = (kcp->stream == 0)? (count - i - 1) : 0;
  456. iqueue_init(&seg->node);
  457. iqueue_add_tail(&seg->node, &kcp->snd_queue);
  458. kcp->nsnd_que++;
  459. if (buffer) {
  460. buffer += size;
  461. }
  462. len -= size;
  463. }
  464. return 0;
  465. }
  466. //---------------------------------------------------------------------
  467. // parse ack
  468. //---------------------------------------------------------------------
  469. static void ikcp_update_ack(ikcpcb *kcp, IINT32 rtt)
  470. {
  471. IINT32 rto = 0;
  472. if (kcp->rx_srtt == 0) {
  473. kcp->rx_srtt = rtt;
  474. kcp->rx_rttval = rtt / 2;
  475. } else {
  476. long delta = rtt - kcp->rx_srtt;
  477. if (delta < 0) delta = -delta;
  478. kcp->rx_rttval = (3 * kcp->rx_rttval + delta) / 4;
  479. kcp->rx_srtt = (7 * kcp->rx_srtt + rtt) / 8;
  480. if (kcp->rx_srtt < 1) kcp->rx_srtt = 1;
  481. }
  482. rto = kcp->rx_srtt + _imax_(kcp->interval, 4 * kcp->rx_rttval);
  483. kcp->rx_rto = _ibound_(kcp->rx_minrto, rto, IKCP_RTO_MAX);
  484. }
  485. static void ikcp_shrink_buf(ikcpcb *kcp)
  486. {
  487. struct IQUEUEHEAD *p = kcp->snd_buf.next;
  488. if (p != &kcp->snd_buf) {
  489. IKCPSEG *seg = iqueue_entry(p, IKCPSEG, node);
  490. kcp->snd_una = seg->sn;
  491. } else {
  492. kcp->snd_una = kcp->snd_nxt;
  493. }
  494. }
  495. static void ikcp_parse_ack(ikcpcb *kcp, IUINT32 sn)
  496. {
  497. struct IQUEUEHEAD *p, *next;
  498. if (_itimediff(sn, kcp->snd_una) < 0 || _itimediff(sn, kcp->snd_nxt) >= 0)
  499. return;
  500. for (p = kcp->snd_buf.next; p != &kcp->snd_buf; p = next) {
  501. IKCPSEG *seg = iqueue_entry(p, IKCPSEG, node);
  502. next = p->next;
  503. if (sn == seg->sn) {
  504. iqueue_del(p);
  505. ikcp_segment_delete(kcp, seg);
  506. kcp->nsnd_buf--;
  507. break;
  508. }
  509. if (_itimediff(sn, seg->sn) < 0) {
  510. break;
  511. }
  512. }
  513. }
  514. static void ikcp_parse_una(ikcpcb *kcp, IUINT32 una)
  515. {
  516. struct IQUEUEHEAD *p, *next;
  517. for (p = kcp->snd_buf.next; p != &kcp->snd_buf; p = next) {
  518. IKCPSEG *seg = iqueue_entry(p, IKCPSEG, node);
  519. next = p->next;
  520. if (_itimediff(una, seg->sn) > 0) {
  521. iqueue_del(p);
  522. ikcp_segment_delete(kcp, seg);
  523. kcp->nsnd_buf--;
  524. } else {
  525. break;
  526. }
  527. }
  528. }
  529. static void ikcp_parse_fastack(ikcpcb *kcp, IUINT32 sn, IUINT32 ts)
  530. {
  531. struct IQUEUEHEAD *p, *next;
  532. if (_itimediff(sn, kcp->snd_una) < 0 || _itimediff(sn, kcp->snd_nxt) >= 0)
  533. return;
  534. for (p = kcp->snd_buf.next; p != &kcp->snd_buf; p = next) {
  535. IKCPSEG *seg = iqueue_entry(p, IKCPSEG, node);
  536. next = p->next;
  537. if (_itimediff(sn, seg->sn) < 0) {
  538. break;
  539. }
  540. else if (sn != seg->sn) {
  541. #ifndef IKCP_FASTACK_CONSERVE
  542. seg->fastack++;
  543. #else
  544. if (_itimediff(ts, seg->ts) >= 0)
  545. seg->fastack++;
  546. #endif
  547. }
  548. }
  549. }
  550. //---------------------------------------------------------------------
  551. // ack append
  552. //---------------------------------------------------------------------
  553. static void ikcp_ack_push(ikcpcb *kcp, IUINT32 sn, IUINT32 ts)
  554. {
  555. IUINT32 newsize = kcp->ackcount + 1;
  556. IUINT32 *ptr;
  557. if (newsize > kcp->ackblock) {
  558. IUINT32 *acklist;
  559. IUINT32 newblock;
  560. for (newblock = 8; newblock < newsize; newblock <<= 1);
  561. acklist = (IUINT32*)ikcp_malloc(newblock * sizeof(IUINT32) * 2);
  562. if (acklist == NULL) {
  563. assert(acklist != NULL);
  564. abort();
  565. }
  566. if (kcp->acklist != NULL) {
  567. IUINT32 x;
  568. for (x = 0; x < kcp->ackcount; x++) {
  569. acklist[x * 2 + 0] = kcp->acklist[x * 2 + 0];
  570. acklist[x * 2 + 1] = kcp->acklist[x * 2 + 1];
  571. }
  572. ikcp_free(kcp->acklist);
  573. }
  574. kcp->acklist = acklist;
  575. kcp->ackblock = newblock;
  576. }
  577. ptr = &kcp->acklist[kcp->ackcount * 2];
  578. ptr[0] = sn;
  579. ptr[1] = ts;
  580. kcp->ackcount++;
  581. }
  582. static void ikcp_ack_get(const ikcpcb *kcp, int p, IUINT32 *sn, IUINT32 *ts)
  583. {
  584. if (sn) sn[0] = kcp->acklist[p * 2 + 0];
  585. if (ts) ts[0] = kcp->acklist[p * 2 + 1];
  586. }
  587. //---------------------------------------------------------------------
  588. // parse data
  589. //---------------------------------------------------------------------
  590. void ikcp_parse_data(ikcpcb *kcp, IKCPSEG *newseg)
  591. {
  592. struct IQUEUEHEAD *p, *prev;
  593. IUINT32 sn = newseg->sn;
  594. int repeat = 0;
  595. if (_itimediff(sn, kcp->rcv_nxt + kcp->rcv_wnd) >= 0 ||
  596. _itimediff(sn, kcp->rcv_nxt) < 0) {
  597. ikcp_segment_delete(kcp, newseg);
  598. return;
  599. }
  600. for (p = kcp->rcv_buf.prev; p != &kcp->rcv_buf; p = prev) {
  601. IKCPSEG *seg = iqueue_entry(p, IKCPSEG, node);
  602. prev = p->prev;
  603. if (seg->sn == sn) {
  604. repeat = 1;
  605. break;
  606. }
  607. if (_itimediff(sn, seg->sn) > 0) {
  608. break;
  609. }
  610. }
  611. if (repeat == 0) {
  612. iqueue_init(&newseg->node);
  613. iqueue_add(&newseg->node, p);
  614. kcp->nrcv_buf++;
  615. } else {
  616. ikcp_segment_delete(kcp, newseg);
  617. }
  618. #if 0
  619. ikcp_qprint("rcvbuf", &kcp->rcv_buf);
  620. printf("rcv_nxt=%lu\n", kcp->rcv_nxt);
  621. #endif
  622. // move available data from rcv_buf -> rcv_queue
  623. while (! iqueue_is_empty(&kcp->rcv_buf)) {
  624. IKCPSEG *seg = iqueue_entry(kcp->rcv_buf.next, IKCPSEG, node);
  625. if (seg->sn == kcp->rcv_nxt && kcp->nrcv_que < kcp->rcv_wnd) {
  626. iqueue_del(&seg->node);
  627. kcp->nrcv_buf--;
  628. iqueue_add_tail(&seg->node, &kcp->rcv_queue);
  629. kcp->nrcv_que++;
  630. kcp->rcv_nxt++;
  631. } else {
  632. break;
  633. }
  634. }
  635. #if 0
  636. ikcp_qprint("queue", &kcp->rcv_queue);
  637. printf("rcv_nxt=%lu\n", kcp->rcv_nxt);
  638. #endif
  639. #if 1
  640. // printf("snd(buf=%d, queue=%d)\n", kcp->nsnd_buf, kcp->nsnd_que);
  641. // printf("rcv(buf=%d, queue=%d)\n", kcp->nrcv_buf, kcp->nrcv_que);
  642. #endif
  643. }
  644. //---------------------------------------------------------------------
  645. // input data
  646. //---------------------------------------------------------------------
  647. int ikcp_input(ikcpcb *kcp, const char *data, long size)
  648. {
  649. IUINT32 prev_una = kcp->snd_una;
  650. IUINT32 maxack = 0, latest_ts = 0;
  651. int flag = 0;
  652. if (ikcp_canlog(kcp, IKCP_LOG_INPUT)) {
  653. ikcp_log(kcp, IKCP_LOG_INPUT, "[RI] %d bytes", (int)size);
  654. }
  655. if (data == NULL || (int)size < (int)IKCP_OVERHEAD) return -1;
  656. while (1) {
  657. IUINT32 ts, sn, len, una, conv;
  658. IUINT16 wnd;
  659. IUINT8 cmd, frg;
  660. IKCPSEG *seg;
  661. if (size < (int)IKCP_OVERHEAD) break;
  662. data = ikcp_decode32u(data, &conv);
  663. if (conv != kcp->conv) return -1;
  664. data = ikcp_decode8u(data, &cmd);
  665. data = ikcp_decode8u(data, &frg);
  666. data = ikcp_decode16u(data, &wnd);
  667. data = ikcp_decode32u(data, &ts);
  668. data = ikcp_decode32u(data, &sn);
  669. data = ikcp_decode32u(data, &una);
  670. data = ikcp_decode32u(data, &len);
  671. size -= IKCP_OVERHEAD;
  672. if ((long)size < (long)len || (int)len < 0) return -2;
  673. if (cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK &&
  674. cmd != IKCP_CMD_WASK && cmd != IKCP_CMD_WINS)
  675. return -3;
  676. kcp->rmt_wnd = wnd;
  677. ikcp_parse_una(kcp, una);
  678. ikcp_shrink_buf(kcp);
  679. if (cmd == IKCP_CMD_ACK) {
  680. if (_itimediff(kcp->current, ts) >= 0) {
  681. ikcp_update_ack(kcp, _itimediff(kcp->current, ts));
  682. }
  683. ikcp_parse_ack(kcp, sn);
  684. ikcp_shrink_buf(kcp);
  685. if (flag == 0) {
  686. flag = 1;
  687. maxack = sn;
  688. latest_ts = ts;
  689. } else {
  690. if (_itimediff(sn, maxack) > 0) {
  691. #ifndef IKCP_FASTACK_CONSERVE
  692. maxack = sn;
  693. latest_ts = ts;
  694. #else
  695. if (_itimediff(ts, latest_ts) > 0) {
  696. maxack = sn;
  697. latest_ts = ts;
  698. }
  699. #endif
  700. }
  701. }
  702. if (ikcp_canlog(kcp, IKCP_LOG_IN_ACK)) {
  703. ikcp_log(kcp, IKCP_LOG_IN_ACK,
  704. "input ack: sn=%lu rtt=%ld rto=%ld", (unsigned long)sn,
  705. (long)_itimediff(kcp->current, ts),
  706. (long)kcp->rx_rto);
  707. }
  708. }
  709. else if (cmd == IKCP_CMD_PUSH) {
  710. if (ikcp_canlog(kcp, IKCP_LOG_IN_DATA)) {
  711. ikcp_log(kcp, IKCP_LOG_IN_DATA,
  712. "input psh: sn=%lu ts=%lu", (unsigned long)sn, (unsigned long)ts);
  713. }
  714. if (_itimediff(sn, kcp->rcv_nxt + kcp->rcv_wnd) < 0) {
  715. ikcp_ack_push(kcp, sn, ts);
  716. if (_itimediff(sn, kcp->rcv_nxt) >= 0) {
  717. seg = ikcp_segment_new(kcp, len);
  718. seg->conv = conv;
  719. seg->cmd = cmd;
  720. seg->frg = frg;
  721. seg->wnd = wnd;
  722. seg->ts = ts;
  723. seg->sn = sn;
  724. seg->una = una;
  725. seg->len = len;
  726. if (len > 0) {
  727. memcpy(seg->data, data, len);
  728. }
  729. ikcp_parse_data(kcp, seg);
  730. }
  731. }
  732. }
  733. else if (cmd == IKCP_CMD_WASK) {
  734. // ready to send back IKCP_CMD_WINS in ikcp_flush
  735. // tell remote my window size
  736. kcp->probe |= IKCP_ASK_TELL;
  737. if (ikcp_canlog(kcp, IKCP_LOG_IN_PROBE)) {
  738. ikcp_log(kcp, IKCP_LOG_IN_PROBE, "input probe");
  739. }
  740. }
  741. else if (cmd == IKCP_CMD_WINS) {
  742. // do nothing
  743. if (ikcp_canlog(kcp, IKCP_LOG_IN_WINS)) {
  744. ikcp_log(kcp, IKCP_LOG_IN_WINS,
  745. "input wins: %lu", (unsigned long)(wnd));
  746. }
  747. }
  748. else {
  749. return -3;
  750. }
  751. data += len;
  752. size -= len;
  753. }
  754. if (flag != 0) {
  755. ikcp_parse_fastack(kcp, maxack, latest_ts);
  756. }
  757. if (_itimediff(kcp->snd_una, prev_una) > 0) {
  758. if (kcp->cwnd < kcp->rmt_wnd) {
  759. IUINT32 mss = kcp->mss;
  760. if (kcp->cwnd < kcp->ssthresh) {
  761. kcp->cwnd++;
  762. kcp->incr += mss;
  763. } else {
  764. if (kcp->incr < mss) kcp->incr = mss;
  765. kcp->incr += (mss * mss) / kcp->incr + (mss / 16);
  766. if ((kcp->cwnd + 1) * mss <= kcp->incr) {
  767. #if 1
  768. kcp->cwnd = (kcp->incr + mss - 1) / ((mss > 0)? mss : 1);
  769. #else
  770. kcp->cwnd++;
  771. #endif
  772. }
  773. }
  774. if (kcp->cwnd > kcp->rmt_wnd) {
  775. kcp->cwnd = kcp->rmt_wnd;
  776. kcp->incr = kcp->rmt_wnd * mss;
  777. }
  778. }
  779. }
  780. return 0;
  781. }
  782. //---------------------------------------------------------------------
  783. // ikcp_encode_seg
  784. //---------------------------------------------------------------------
  785. static char *ikcp_encode_seg(char *ptr, const IKCPSEG *seg)
  786. {
  787. ptr = ikcp_encode32u(ptr, seg->conv);
  788. ptr = ikcp_encode8u(ptr, (IUINT8)seg->cmd);
  789. ptr = ikcp_encode8u(ptr, (IUINT8)seg->frg);
  790. ptr = ikcp_encode16u(ptr, (IUINT16)seg->wnd);
  791. ptr = ikcp_encode32u(ptr, seg->ts);
  792. ptr = ikcp_encode32u(ptr, seg->sn);
  793. ptr = ikcp_encode32u(ptr, seg->una);
  794. ptr = ikcp_encode32u(ptr, seg->len);
  795. return ptr;
  796. }
  797. static int ikcp_wnd_unused(const ikcpcb *kcp)
  798. {
  799. if (kcp->nrcv_que < kcp->rcv_wnd) {
  800. return kcp->rcv_wnd - kcp->nrcv_que;
  801. }
  802. return 0;
  803. }
  804. //---------------------------------------------------------------------
  805. // ikcp_flush
  806. //---------------------------------------------------------------------
  807. void ikcp_flush(ikcpcb *kcp)
  808. {
  809. IUINT32 current = kcp->current;
  810. char *buffer = kcp->buffer;
  811. char *ptr = buffer;
  812. int count, size, i;
  813. IUINT32 resent, cwnd;
  814. IUINT32 rtomin;
  815. struct IQUEUEHEAD *p;
  816. int change = 0;
  817. int lost = 0;
  818. IKCPSEG seg;
  819. // 'ikcp_update' haven't been called.
  820. if (kcp->updated == 0) return;
  821. seg.conv = kcp->conv;
  822. seg.cmd = IKCP_CMD_ACK;
  823. seg.frg = 0;
  824. seg.wnd = ikcp_wnd_unused(kcp);
  825. seg.una = kcp->rcv_nxt;
  826. seg.len = 0;
  827. seg.sn = 0;
  828. seg.ts = 0;
  829. // flush acknowledges
  830. count = kcp->ackcount;
  831. for (i = 0; i < count; i++) {
  832. size = (int)(ptr - buffer);
  833. if (size + (int)IKCP_OVERHEAD > (int)kcp->mtu) {
  834. ikcp_output(kcp, buffer, size);
  835. ptr = buffer;
  836. }
  837. ikcp_ack_get(kcp, i, &seg.sn, &seg.ts);
  838. ptr = ikcp_encode_seg(ptr, &seg);
  839. }
  840. kcp->ackcount = 0;
  841. // probe window size (if remote window size equals zero)
  842. if (kcp->rmt_wnd == 0) {
  843. if (kcp->probe_wait == 0) {
  844. kcp->probe_wait = IKCP_PROBE_INIT;
  845. kcp->ts_probe = kcp->current + kcp->probe_wait;
  846. }
  847. else {
  848. if (_itimediff(kcp->current, kcp->ts_probe) >= 0) {
  849. if (kcp->probe_wait < IKCP_PROBE_INIT)
  850. kcp->probe_wait = IKCP_PROBE_INIT;
  851. kcp->probe_wait += kcp->probe_wait / 2;
  852. if (kcp->probe_wait > IKCP_PROBE_LIMIT)
  853. kcp->probe_wait = IKCP_PROBE_LIMIT;
  854. kcp->ts_probe = kcp->current + kcp->probe_wait;
  855. kcp->probe |= IKCP_ASK_SEND;
  856. }
  857. }
  858. } else {
  859. kcp->ts_probe = 0;
  860. kcp->probe_wait = 0;
  861. }
  862. // flush window probing commands
  863. if (kcp->probe & IKCP_ASK_SEND) {
  864. seg.cmd = IKCP_CMD_WASK;
  865. size = (int)(ptr - buffer);
  866. if (size + (int)IKCP_OVERHEAD > (int)kcp->mtu) {
  867. ikcp_output(kcp, buffer, size);
  868. ptr = buffer;
  869. }
  870. ptr = ikcp_encode_seg(ptr, &seg);
  871. }
  872. // flush window probing commands
  873. if (kcp->probe & IKCP_ASK_TELL) {
  874. seg.cmd = IKCP_CMD_WINS;
  875. size = (int)(ptr - buffer);
  876. if (size + (int)IKCP_OVERHEAD > (int)kcp->mtu) {
  877. ikcp_output(kcp, buffer, size);
  878. ptr = buffer;
  879. }
  880. ptr = ikcp_encode_seg(ptr, &seg);
  881. }
  882. kcp->probe = 0;
  883. // calculate window size
  884. cwnd = _imin_(kcp->snd_wnd, kcp->rmt_wnd);
  885. if (kcp->nocwnd == 0) cwnd = _imin_(kcp->cwnd, cwnd);
  886. // move data from snd_queue to snd_buf
  887. while (_itimediff(kcp->snd_nxt, kcp->snd_una + cwnd) < 0) {
  888. IKCPSEG *newseg;
  889. if (iqueue_is_empty(&kcp->snd_queue)) break;
  890. newseg = iqueue_entry(kcp->snd_queue.next, IKCPSEG, node);
  891. iqueue_del(&newseg->node);
  892. iqueue_add_tail(&newseg->node, &kcp->snd_buf);
  893. kcp->nsnd_que--;
  894. kcp->nsnd_buf++;
  895. newseg->conv = kcp->conv;
  896. newseg->cmd = IKCP_CMD_PUSH;
  897. newseg->wnd = seg.wnd;
  898. newseg->ts = current;
  899. newseg->sn = kcp->snd_nxt++;
  900. newseg->una = kcp->rcv_nxt;
  901. newseg->resendts = current;
  902. newseg->rto = kcp->rx_rto;
  903. newseg->fastack = 0;
  904. newseg->xmit = 0;
  905. }
  906. // calculate resent
  907. resent = (kcp->fastresend > 0)? (IUINT32)kcp->fastresend : 0xffffffff;
  908. rtomin = (kcp->nodelay == 0)? (kcp->rx_rto >> 3) : 0;
  909. // flush data segments
  910. for (p = kcp->snd_buf.next; p != &kcp->snd_buf; p = p->next) {
  911. IKCPSEG *segment = iqueue_entry(p, IKCPSEG, node);
  912. int needsend = 0;
  913. if (segment->xmit == 0) {
  914. needsend = 1;
  915. segment->xmit++;
  916. segment->rto = kcp->rx_rto;
  917. segment->resendts = current + segment->rto + rtomin;
  918. }
  919. else if (_itimediff(current, segment->resendts) >= 0) {
  920. needsend = 1;
  921. segment->xmit++;
  922. kcp->xmit++;
  923. if (kcp->nodelay == 0) {
  924. segment->rto += _imax_(segment->rto, (IUINT32)kcp->rx_rto);
  925. } else {
  926. IINT32 step = (kcp->nodelay < 2)?
  927. ((IINT32)(segment->rto)) : kcp->rx_rto;
  928. segment->rto += step / 2;
  929. }
  930. segment->resendts = current + segment->rto;
  931. lost = 1;
  932. }
  933. else if (segment->fastack >= resent) {
  934. if ((int)segment->xmit <= kcp->fastlimit ||
  935. kcp->fastlimit <= 0) {
  936. needsend = 1;
  937. segment->xmit++;
  938. segment->fastack = 0;
  939. segment->resendts = current + segment->rto;
  940. change++;
  941. }
  942. }
  943. if (needsend) {
  944. int need;
  945. segment->ts = current;
  946. segment->wnd = seg.wnd;
  947. segment->una = kcp->rcv_nxt;
  948. size = (int)(ptr - buffer);
  949. need = IKCP_OVERHEAD + segment->len;
  950. if (size + need > (int)kcp->mtu) {
  951. ikcp_output(kcp, buffer, size);
  952. ptr = buffer;
  953. }
  954. ptr = ikcp_encode_seg(ptr, segment);
  955. if (segment->len > 0) {
  956. memcpy(ptr, segment->data, segment->len);
  957. ptr += segment->len;
  958. }
  959. if (segment->xmit >= kcp->dead_link) {
  960. kcp->state = (IUINT32)-1;
  961. }
  962. }
  963. }
  964. // flash remain segments
  965. size = (int)(ptr - buffer);
  966. if (size > 0) {
  967. ikcp_output(kcp, buffer, size);
  968. }
  969. // update ssthresh
  970. if (change) {
  971. IUINT32 inflight = kcp->snd_nxt - kcp->snd_una;
  972. kcp->ssthresh = inflight / 2;
  973. if (kcp->ssthresh < IKCP_THRESH_MIN)
  974. kcp->ssthresh = IKCP_THRESH_MIN;
  975. kcp->cwnd = kcp->ssthresh + resent;
  976. kcp->incr = kcp->cwnd * kcp->mss;
  977. }
  978. if (lost) {
  979. kcp->ssthresh = cwnd / 2;
  980. if (kcp->ssthresh < IKCP_THRESH_MIN)
  981. kcp->ssthresh = IKCP_THRESH_MIN;
  982. kcp->cwnd = 1;
  983. kcp->incr = kcp->mss;
  984. }
  985. if (kcp->cwnd < 1) {
  986. kcp->cwnd = 1;
  987. kcp->incr = kcp->mss;
  988. }
  989. }
  990. //---------------------------------------------------------------------
  991. // update state (call it repeatedly, every 10ms-100ms), or you can ask
  992. // ikcp_check when to call it again (without ikcp_input/_send calling).
  993. // 'current' - current timestamp in millisec.
  994. //---------------------------------------------------------------------
  995. void ikcp_update(ikcpcb *kcp, IUINT32 current)
  996. {
  997. IINT32 slap;
  998. kcp->current = current;
  999. if (kcp->updated == 0) {
  1000. kcp->updated = 1;
  1001. kcp->ts_flush = kcp->current;
  1002. }
  1003. slap = _itimediff(kcp->current, kcp->ts_flush);
  1004. if (slap >= 10000 || slap < -10000) {
  1005. kcp->ts_flush = kcp->current;
  1006. slap = 0;
  1007. }
  1008. if (slap >= 0) {
  1009. kcp->ts_flush += kcp->interval;
  1010. if (_itimediff(kcp->current, kcp->ts_flush) >= 0) {
  1011. kcp->ts_flush = kcp->current + kcp->interval;
  1012. }
  1013. ikcp_flush(kcp);
  1014. }
  1015. }
  1016. //---------------------------------------------------------------------
  1017. // Determine when should you invoke ikcp_update:
  1018. // returns when you should invoke ikcp_update in millisec, if there
  1019. // is no ikcp_input/_send calling. you can call ikcp_update in that
  1020. // time, instead of call update repeatly.
  1021. // Important to reduce unnacessary ikcp_update invoking. use it to
  1022. // schedule ikcp_update (eg. implementing an epoll-like mechanism,
  1023. // or optimize ikcp_update when handling massive kcp connections)
  1024. //---------------------------------------------------------------------
  1025. IUINT32 ikcp_check(const ikcpcb *kcp, IUINT32 current)
  1026. {
  1027. IUINT32 ts_flush = kcp->ts_flush;
  1028. IINT32 tm_flush = 0x7fffffff;
  1029. IINT32 tm_packet = 0x7fffffff;
  1030. IUINT32 minimal = 0;
  1031. struct IQUEUEHEAD *p;
  1032. if (kcp->updated == 0) {
  1033. return current;
  1034. }
  1035. if (_itimediff(current, ts_flush) >= 10000 ||
  1036. _itimediff(current, ts_flush) < -10000) {
  1037. ts_flush = current;
  1038. }
  1039. if (_itimediff(current, ts_flush) >= 0) {
  1040. return current;
  1041. }
  1042. tm_flush = _itimediff(ts_flush, current);
  1043. for (p = kcp->snd_buf.next; p != &kcp->snd_buf; p = p->next) {
  1044. const IKCPSEG *seg = iqueue_entry(p, const IKCPSEG, node);
  1045. IINT32 diff = _itimediff(seg->resendts, current);
  1046. if (diff <= 0) {
  1047. return current;
  1048. }
  1049. if (diff < tm_packet) tm_packet = diff;
  1050. }
  1051. minimal = (IUINT32)(tm_packet < tm_flush ? tm_packet : tm_flush);
  1052. if (minimal >= kcp->interval) minimal = kcp->interval;
  1053. return current + minimal;
  1054. }
  1055. int ikcp_setmtu(ikcpcb *kcp, int mtu)
  1056. {
  1057. char *buffer;
  1058. if (mtu < 50 || mtu < (int)IKCP_OVERHEAD)
  1059. return -1;
  1060. buffer = (char*)ikcp_malloc((mtu + IKCP_OVERHEAD) * 3);
  1061. if (buffer == NULL)
  1062. return -2;
  1063. kcp->mtu = mtu;
  1064. kcp->mss = kcp->mtu - IKCP_OVERHEAD;
  1065. ikcp_free(kcp->buffer);
  1066. kcp->buffer = buffer;
  1067. return 0;
  1068. }
  1069. int ikcp_interval(ikcpcb *kcp, int interval)
  1070. {
  1071. if (interval > 5000) interval = 5000;
  1072. else if (interval < 10) interval = 10;
  1073. kcp->interval = interval;
  1074. return 0;
  1075. }
  1076. int ikcp_nodelay(ikcpcb *kcp, int nodelay, int interval, int resend, int nc)
  1077. {
  1078. if (nodelay >= 0) {
  1079. kcp->nodelay = nodelay;
  1080. if (nodelay) {
  1081. kcp->rx_minrto = IKCP_RTO_NDL;
  1082. }
  1083. else {
  1084. kcp->rx_minrto = IKCP_RTO_MIN;
  1085. }
  1086. }
  1087. if (interval >= 0) {
  1088. if (interval > 5000) interval = 5000;
  1089. else if (interval < 10) interval = 10;
  1090. kcp->interval = interval;
  1091. }
  1092. if (resend >= 0) {
  1093. kcp->fastresend = resend;
  1094. }
  1095. if (nc >= 0) {
  1096. kcp->nocwnd = nc;
  1097. }
  1098. return 0;
  1099. }
  1100. int ikcp_wndsize(ikcpcb *kcp, int sndwnd, int rcvwnd)
  1101. {
  1102. if (kcp) {
  1103. if (sndwnd > 0) {
  1104. kcp->snd_wnd = sndwnd;
  1105. }
  1106. if (rcvwnd > 0) { // must >= max fragment size
  1107. kcp->rcv_wnd = _imax_(rcvwnd, IKCP_WND_RCV);
  1108. }
  1109. }
  1110. return 0;
  1111. }
  1112. int ikcp_waitsnd(const ikcpcb *kcp)
  1113. {
  1114. return kcp->nsnd_buf + kcp->nsnd_que;
  1115. }
  1116. // read conv
  1117. IUINT32 ikcp_getconv(const void *ptr)
  1118. {
  1119. IUINT32 conv;
  1120. ikcp_decode32u((const char*)ptr, &conv);
  1121. return conv;
  1122. }