workqueue.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. /*
  2. * Quagga Work Queue Support.
  3. *
  4. * Copyright (C) 2005 Sun Microsystems, Inc.
  5. *
  6. * This file is part of GNU Zebra.
  7. *
  8. * Quagga is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2, or (at your option) any
  11. * later version.
  12. *
  13. * Quagga is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with Quagga; see the file COPYING. If not, write to the Free
  20. * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  21. * 02111-1307, USA.
  22. */
  23. #include <zebra.h>
  24. #include "thread.h"
  25. #include "memory.h"
  26. #include "workqueue.h"
  27. #include "linklist.h"
  28. #include "command.h"
  29. #include "log.h"
  30. /* master list of work_queues */
  31. static struct list _work_queues;
  32. /* pointer primarily to avoid an otherwise harmless warning on
  33. * ALL_LIST_ELEMENTS_RO
  34. */
  35. static struct list *work_queues = &_work_queues;
  36. #define WORK_QUEUE_MIN_GRANULARITY 1
  37. static struct work_queue_item *
  38. work_queue_item_new (struct work_queue *wq)
  39. {
  40. struct work_queue_item *item;
  41. assert (wq);
  42. item = XCALLOC (MTYPE_WORK_QUEUE_ITEM,
  43. sizeof (struct work_queue_item));
  44. return item;
  45. }
  46. static void
  47. work_queue_item_free (struct work_queue_item *item)
  48. {
  49. XFREE (MTYPE_WORK_QUEUE_ITEM, item);
  50. return;
  51. }
  52. /* create new work queue */
  53. struct work_queue *
  54. work_queue_new (struct thread_master *m, const char *queue_name)
  55. {
  56. struct work_queue *new;
  57. new = XCALLOC (MTYPE_WORK_QUEUE, sizeof (struct work_queue));
  58. if (new == NULL)
  59. return new;
  60. new->name = XSTRDUP (MTYPE_WORK_QUEUE_NAME, queue_name);
  61. new->master = m;
  62. SET_FLAG (new->flags, WQ_UNPLUGGED);
  63. if ( (new->items = list_new ()) == NULL)
  64. {
  65. XFREE (MTYPE_WORK_QUEUE_NAME, new->name);
  66. XFREE (MTYPE_WORK_QUEUE, new);
  67. return NULL;
  68. }
  69. new->items->del = (void (*)(void *)) work_queue_item_free;
  70. listnode_add (work_queues, new);
  71. new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
  72. new->cycles.worst = UINT_MAX;
  73. /* Default values, can be overriden by caller */
  74. new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
  75. return new;
  76. }
  77. void
  78. work_queue_free (struct work_queue *wq)
  79. {
  80. if (wq->thread != NULL)
  81. thread_cancel(wq->thread);
  82. /* list_delete frees items via callback */
  83. list_delete (wq->items);
  84. listnode_delete (work_queues, wq);
  85. XFREE (MTYPE_WORK_QUEUE_NAME, wq->name);
  86. XFREE (MTYPE_WORK_QUEUE, wq);
  87. return;
  88. }
  89. bool
  90. work_queue_is_scheduled (struct work_queue *wq)
  91. {
  92. return (wq->thread != NULL);
  93. }
  94. static int
  95. work_queue_schedule (struct work_queue *wq, unsigned int delay)
  96. {
  97. /* if appropriate, schedule work queue thread */
  98. if ( CHECK_FLAG (wq->flags, WQ_UNPLUGGED)
  99. && (wq->thread == NULL)
  100. && (listcount (wq->items) > 0) )
  101. {
  102. wq->thread = thread_add_background (wq->master, work_queue_run,
  103. wq, delay);
  104. return 1;
  105. }
  106. else
  107. return 0;
  108. }
  109. void
  110. work_queue_add (struct work_queue *wq, void *data)
  111. {
  112. struct work_queue_item *item;
  113. assert (wq);
  114. if (!(item = work_queue_item_new (wq)))
  115. {
  116. zlog_err ("%s: unable to get new queue item", __func__);
  117. return;
  118. }
  119. item->data = data;
  120. listnode_add (wq->items, item);
  121. work_queue_schedule (wq, wq->spec.hold);
  122. return;
  123. }
  124. static void
  125. work_queue_item_remove (struct work_queue *wq, struct listnode *ln)
  126. {
  127. struct work_queue_item *item = listgetdata (ln);
  128. assert (item && item->data);
  129. /* call private data deletion callback if needed */
  130. if (wq->spec.del_item_data)
  131. wq->spec.del_item_data (wq, item->data);
  132. list_delete_node (wq->items, ln);
  133. work_queue_item_free (item);
  134. return;
  135. }
  136. static void
  137. work_queue_item_requeue (struct work_queue *wq, struct listnode *ln)
  138. {
  139. LISTNODE_DETACH (wq->items, ln);
  140. LISTNODE_ATTACH (wq->items, ln); /* attach to end of list */
  141. }
  142. DEFUN(show_work_queues,
  143. show_work_queues_cmd,
  144. "show work-queues",
  145. SHOW_STR
  146. "Work Queue information\n")
  147. {
  148. struct listnode *node;
  149. struct work_queue *wq;
  150. vty_out (vty,
  151. "%c %8s %5s %8s %21s %6s %5s%s",
  152. ' ', "List","(ms) ","Q. Runs","Cycle Counts ",
  153. " ","Worst",
  154. VTY_NEWLINE);
  155. vty_out (vty,
  156. "%c %8s %5s %8s %7s %6s %6s %6s %5s %s%s",
  157. 'P',
  158. "Items",
  159. "Hold",
  160. "Total",
  161. "Best","Worst","Gran.","Avg.", "Lat.",
  162. "Name",
  163. VTY_NEWLINE);
  164. for (ALL_LIST_ELEMENTS_RO (work_queues, node, wq))
  165. {
  166. vty_out (vty,"%c %8u %5u %8lu %7u %6u %6u %6u %5lu %s%s",
  167. (CHECK_FLAG (wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'),
  168. listcount (wq->items),
  169. wq->spec.hold,
  170. wq->runs,
  171. wq->cycles.best,
  172. MIN(wq->cycles.best, wq->cycles.worst),
  173. wq->cycles.granularity,
  174. (wq->runs) ?
  175. (unsigned int) (wq->cycles.total / wq->runs) : 0,
  176. wq->worst_usec,
  177. wq->name,
  178. VTY_NEWLINE);
  179. }
  180. return CMD_SUCCESS;
  181. }
  182. /* 'plug' a queue: Stop it from being scheduled,
  183. * ie: prevent the queue from draining.
  184. */
  185. void
  186. work_queue_plug (struct work_queue *wq)
  187. {
  188. if (wq->thread)
  189. thread_cancel (wq->thread);
  190. wq->thread = NULL;
  191. UNSET_FLAG (wq->flags, WQ_UNPLUGGED);
  192. }
  193. /* unplug queue, schedule it again, if appropriate
  194. * Ie: Allow the queue to be drained again
  195. */
  196. void
  197. work_queue_unplug (struct work_queue *wq)
  198. {
  199. SET_FLAG (wq->flags, WQ_UNPLUGGED);
  200. /* if thread isnt already waiting, add one */
  201. work_queue_schedule (wq, wq->spec.hold);
  202. }
  203. /* timer thread to process a work queue
  204. * will reschedule itself if required,
  205. * otherwise work_queue_item_add
  206. */
  207. int
  208. work_queue_run (struct thread *thread)
  209. {
  210. struct work_queue *wq;
  211. struct work_queue_item *item;
  212. unsigned long took;
  213. wq_item_status ret;
  214. unsigned int cycles = 0;
  215. struct listnode *node, *nnode;
  216. char yielded = 0;
  217. wq = THREAD_ARG (thread);
  218. wq->thread = NULL;
  219. assert (wq && wq->items);
  220. /* calculate cycle granularity:
  221. * list iteration == 1 cycle
  222. * granularity == # cycles between checks whether we should yield.
  223. *
  224. * granularity should be > 0, and can increase slowly after each run to
  225. * provide some hysteris, but not past cycles.best or 2*cycles.
  226. *
  227. * Best: starts low, can only increase
  228. *
  229. * Worst: starts at MAX, can only decrease.
  230. *
  231. * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
  232. * if we run to end of time slot, can increase otherwise
  233. * by a small factor.
  234. *
  235. * We could use just the average and save some work, however we want to be
  236. * able to adjust quickly to CPU pressure. Average wont shift much if
  237. * daemon has been running a long time.
  238. */
  239. if (wq->cycles.granularity == 0)
  240. wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
  241. for (ALL_LIST_ELEMENTS (wq->items, node, nnode, item))
  242. {
  243. assert (item && item->data);
  244. /* dont run items which are past their allowed retries */
  245. if (item->ran > wq->spec.max_retries)
  246. {
  247. /* run error handler, if any */
  248. if (wq->spec.errorfunc)
  249. wq->spec.errorfunc (wq, item->data);
  250. work_queue_item_remove (wq, node);
  251. continue;
  252. }
  253. /* run and take care of items that want to be retried immediately */
  254. do
  255. {
  256. ret = wq->spec.workfunc (wq, item->data);
  257. item->ran++;
  258. }
  259. while ((ret == WQ_RETRY_NOW)
  260. && (item->ran < wq->spec.max_retries));
  261. switch (ret)
  262. {
  263. case WQ_QUEUE_BLOCKED:
  264. {
  265. /* decrement item->ran again, cause this isn't an item
  266. * specific error, and fall through to WQ_RETRY_LATER
  267. */
  268. item->ran--;
  269. }
  270. case WQ_RETRY_LATER:
  271. {
  272. goto stats;
  273. }
  274. case WQ_REQUEUE:
  275. {
  276. item->ran--;
  277. work_queue_item_requeue (wq, node);
  278. break;
  279. }
  280. case WQ_RETRY_NOW:
  281. /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */
  282. case WQ_ERROR:
  283. {
  284. if (wq->spec.errorfunc)
  285. wq->spec.errorfunc (wq, item);
  286. }
  287. /* fall through here is deliberate */
  288. case WQ_SUCCESS:
  289. default:
  290. {
  291. work_queue_item_remove (wq, node);
  292. break;
  293. }
  294. }
  295. /* completed cycle */
  296. cycles++;
  297. /* test if we should yield */
  298. if ( !(cycles % wq->cycles.granularity)
  299. && (took = thread_should_yield (thread)))
  300. {
  301. yielded = 1;
  302. goto stats;
  303. }
  304. }
  305. stats:
  306. #define WQ_HYSTERESIS_FACTOR 4
  307. if (cycles > wq->cycles.best)
  308. wq->cycles.best = cycles;
  309. if (took > wq->worst_usec)
  310. wq->worst_usec = took;
  311. /* we yielded, check whether granularity should be reduced */
  312. if (yielded && (cycles < wq->cycles.granularity))
  313. {
  314. wq->cycles.granularity = ((cycles > 0) ? cycles
  315. : WORK_QUEUE_MIN_GRANULARITY);
  316. if (cycles < wq->cycles.worst)
  317. wq->cycles.worst = cycles;
  318. }
  319. /* otherwise, should granularity increase? */
  320. else if (cycles >= (wq->cycles.granularity))
  321. {
  322. /* along with yielded check, provides hysteresis for granularity */
  323. if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR
  324. * WQ_HYSTERESIS_FACTOR))
  325. wq->cycles.granularity *= WQ_HYSTERESIS_FACTOR; /* quick ramp-up */
  326. else if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR))
  327. wq->cycles.granularity += WQ_HYSTERESIS_FACTOR;
  328. /* clamp granularity down to the worst yielded cycle count */
  329. wq->cycles.granularity = MIN(wq->cycles.granularity, wq->cycles.worst);
  330. }
  331. #undef WQ_HYSTERIS_FACTOR
  332. wq->runs++;
  333. wq->cycles.total += cycles;
  334. #if 0
  335. printf ("%s: cycles %d, new: best %d, worst %d\n",
  336. __func__, cycles, wq->cycles.best, wq->cycles.granularity);
  337. #endif
  338. /* Is the queue done yet? If it is, call the completion callback. */
  339. if (listcount (wq->items) > 0)
  340. work_queue_schedule (wq, 0);
  341. else if (wq->spec.completion_func)
  342. wq->spec.completion_func (wq);
  343. return 0;
  344. }