workqueue.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. /*
  2. * Quagga Work Queue Support.
  3. *
  4. * Copyright (C) 2005 Sun Microsystems, Inc.
  5. *
  6. * This file is part of GNU Zebra.
  7. *
  8. * Quagga is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2, or (at your option) any
  11. * later version.
  12. *
  13. * Quagga is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with Quagga; see the file COPYING. If not, write to the Free
  20. * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  21. * 02111-1307, USA.
  22. */
  23. #include <zebra.h>
  24. #include "thread.h"
  25. #include "memory.h"
  26. #include "workqueue.h"
  27. #include "linklist.h"
  28. #include "command.h"
  29. #include "log.h"
  30. /* master list of work_queues */
  31. static struct list _work_queues;
  32. /* pointer primarily to avoid an otherwise harmless warning on
  33. * ALL_LIST_ELEMENTS_RO
  34. */
  35. static struct list *work_queues = &_work_queues;
  36. #define WORK_QUEUE_MIN_GRANULARITY 1
  37. static struct work_queue_item *
  38. work_queue_item_new (struct work_queue *wq)
  39. {
  40. struct work_queue_item *item;
  41. assert (wq);
  42. item = XCALLOC (MTYPE_WORK_QUEUE_ITEM,
  43. sizeof (struct work_queue_item));
  44. return item;
  45. }
  46. static void
  47. work_queue_item_free (struct work_queue_item *item)
  48. {
  49. XFREE (MTYPE_WORK_QUEUE_ITEM, item);
  50. return;
  51. }
  52. /* create new work queue */
  53. struct work_queue *
  54. work_queue_new (struct thread_master *m, const char *queue_name)
  55. {
  56. struct work_queue *new;
  57. new = XCALLOC (MTYPE_WORK_QUEUE, sizeof (struct work_queue));
  58. if (new == NULL)
  59. return new;
  60. new->name = XSTRDUP (MTYPE_WORK_QUEUE_NAME, queue_name);
  61. new->master = m;
  62. SET_FLAG (new->flags, WQ_UNPLUGGED);
  63. if ( (new->items = list_new ()) == NULL)
  64. {
  65. XFREE (MTYPE_WORK_QUEUE_NAME, new->name);
  66. XFREE (MTYPE_WORK_QUEUE, new);
  67. return NULL;
  68. }
  69. new->items->del = (void (*)(void *)) work_queue_item_free;
  70. listnode_add (work_queues, new);
  71. new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
  72. /* Default values, can be overriden by caller */
  73. new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
  74. return new;
  75. }
  76. void
  77. work_queue_free (struct work_queue *wq)
  78. {
  79. if (wq->thread != NULL)
  80. thread_cancel(wq->thread);
  81. /* list_delete frees items via callback */
  82. list_delete (wq->items);
  83. listnode_delete (work_queues, wq);
  84. XFREE (MTYPE_WORK_QUEUE_NAME, wq->name);
  85. XFREE (MTYPE_WORK_QUEUE, wq);
  86. return;
  87. }
  88. bool
  89. work_queue_is_scheduled (struct work_queue *wq)
  90. {
  91. return (wq->thread != NULL);
  92. }
  93. static int
  94. work_queue_schedule (struct work_queue *wq, unsigned int delay)
  95. {
  96. /* if appropriate, schedule work queue thread */
  97. if ( CHECK_FLAG (wq->flags, WQ_UNPLUGGED)
  98. && (wq->thread == NULL)
  99. && (listcount (wq->items) > 0) )
  100. {
  101. wq->thread = thread_add_background (wq->master, work_queue_run,
  102. wq, delay);
  103. return 1;
  104. }
  105. else
  106. return 0;
  107. }
  108. void
  109. work_queue_add (struct work_queue *wq, void *data)
  110. {
  111. struct work_queue_item *item;
  112. assert (wq);
  113. if (!(item = work_queue_item_new (wq)))
  114. {
  115. zlog_err ("%s: unable to get new queue item", __func__);
  116. return;
  117. }
  118. item->data = data;
  119. listnode_add (wq->items, item);
  120. work_queue_schedule (wq, wq->spec.hold);
  121. return;
  122. }
  123. static void
  124. work_queue_item_remove (struct work_queue *wq, struct listnode *ln)
  125. {
  126. struct work_queue_item *item = listgetdata (ln);
  127. assert (item && item->data);
  128. /* call private data deletion callback if needed */
  129. if (wq->spec.del_item_data)
  130. wq->spec.del_item_data (wq, item->data);
  131. list_delete_node (wq->items, ln);
  132. work_queue_item_free (item);
  133. return;
  134. }
  135. static void
  136. work_queue_item_requeue (struct work_queue *wq, struct listnode *ln)
  137. {
  138. LISTNODE_DETACH (wq->items, ln);
  139. LISTNODE_ATTACH (wq->items, ln); /* attach to end of list */
  140. }
  141. DEFUN(show_work_queues,
  142. show_work_queues_cmd,
  143. "show work-queues",
  144. SHOW_STR
  145. "Work Queue information\n")
  146. {
  147. struct listnode *node;
  148. struct work_queue *wq;
  149. vty_out (vty,
  150. "%c %8s %5s %8s %21s%s",
  151. ' ', "List","(ms) ","Q. Runs","Cycle Counts ",
  152. VTY_NEWLINE);
  153. vty_out (vty,
  154. "%c %8s %5s %8s %7s %6s %6s %s%s",
  155. 'P',
  156. "Items",
  157. "Hold",
  158. "Total",
  159. "Best","Gran.","Avg.",
  160. "Name",
  161. VTY_NEWLINE);
  162. for (ALL_LIST_ELEMENTS_RO (work_queues, node, wq))
  163. {
  164. vty_out (vty,"%c %8d %5d %8ld %7d %6d %6u %s%s",
  165. (CHECK_FLAG (wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'),
  166. listcount (wq->items),
  167. wq->spec.hold,
  168. wq->runs,
  169. wq->cycles.best, wq->cycles.granularity,
  170. (wq->runs) ?
  171. (unsigned int) (wq->cycles.total / wq->runs) : 0,
  172. wq->name,
  173. VTY_NEWLINE);
  174. }
  175. return CMD_SUCCESS;
  176. }
  177. /* 'plug' a queue: Stop it from being scheduled,
  178. * ie: prevent the queue from draining.
  179. */
  180. void
  181. work_queue_plug (struct work_queue *wq)
  182. {
  183. if (wq->thread)
  184. thread_cancel (wq->thread);
  185. wq->thread = NULL;
  186. UNSET_FLAG (wq->flags, WQ_UNPLUGGED);
  187. }
  188. /* unplug queue, schedule it again, if appropriate
  189. * Ie: Allow the queue to be drained again
  190. */
  191. void
  192. work_queue_unplug (struct work_queue *wq)
  193. {
  194. SET_FLAG (wq->flags, WQ_UNPLUGGED);
  195. /* if thread isnt already waiting, add one */
  196. work_queue_schedule (wq, wq->spec.hold);
  197. }
  198. /* timer thread to process a work queue
  199. * will reschedule itself if required,
  200. * otherwise work_queue_item_add
  201. */
  202. int
  203. work_queue_run (struct thread *thread)
  204. {
  205. struct work_queue *wq;
  206. struct work_queue_item *item;
  207. wq_item_status ret;
  208. unsigned int cycles = 0;
  209. struct listnode *node, *nnode;
  210. char yielded = 0;
  211. wq = THREAD_ARG (thread);
  212. wq->thread = NULL;
  213. assert (wq && wq->items);
  214. /* calculate cycle granularity:
  215. * list iteration == 1 cycle
  216. * granularity == # cycles between checks whether we should yield.
  217. *
  218. * granularity should be > 0, and can increase slowly after each run to
  219. * provide some hysteris, but not past cycles.best or 2*cycles.
  220. *
  221. * Best: starts low, can only increase
  222. *
  223. * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
  224. * if we run to end of time slot, can increase otherwise
  225. * by a small factor.
  226. *
  227. * We could use just the average and save some work, however we want to be
  228. * able to adjust quickly to CPU pressure. Average wont shift much if
  229. * daemon has been running a long time.
  230. */
  231. if (wq->cycles.granularity == 0)
  232. wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
  233. for (ALL_LIST_ELEMENTS (wq->items, node, nnode, item))
  234. {
  235. assert (item && item->data);
  236. /* dont run items which are past their allowed retries */
  237. if (item->ran > wq->spec.max_retries)
  238. {
  239. /* run error handler, if any */
  240. if (wq->spec.errorfunc)
  241. wq->spec.errorfunc (wq, item->data);
  242. work_queue_item_remove (wq, node);
  243. continue;
  244. }
  245. /* run and take care of items that want to be retried immediately */
  246. do
  247. {
  248. ret = wq->spec.workfunc (wq, item->data);
  249. item->ran++;
  250. }
  251. while ((ret == WQ_RETRY_NOW)
  252. && (item->ran < wq->spec.max_retries));
  253. switch (ret)
  254. {
  255. case WQ_QUEUE_BLOCKED:
  256. {
  257. /* decrement item->ran again, cause this isn't an item
  258. * specific error, and fall through to WQ_RETRY_LATER
  259. */
  260. item->ran--;
  261. }
  262. case WQ_RETRY_LATER:
  263. {
  264. goto stats;
  265. }
  266. case WQ_REQUEUE:
  267. {
  268. item->ran--;
  269. work_queue_item_requeue (wq, node);
  270. break;
  271. }
  272. case WQ_RETRY_NOW:
  273. /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */
  274. case WQ_ERROR:
  275. {
  276. if (wq->spec.errorfunc)
  277. wq->spec.errorfunc (wq, item);
  278. }
  279. /* fall through here is deliberate */
  280. case WQ_SUCCESS:
  281. default:
  282. {
  283. work_queue_item_remove (wq, node);
  284. break;
  285. }
  286. }
  287. /* completed cycle */
  288. cycles++;
  289. /* test if we should yield */
  290. if ( !(cycles % wq->cycles.granularity)
  291. && thread_should_yield (thread))
  292. {
  293. yielded = 1;
  294. goto stats;
  295. }
  296. }
  297. stats:
  298. #define WQ_HYSTERESIS_FACTOR 4
  299. /* we yielded, check whether granularity should be reduced */
  300. if (yielded && (cycles < wq->cycles.granularity))
  301. {
  302. wq->cycles.granularity = ((cycles > 0) ? cycles
  303. : WORK_QUEUE_MIN_GRANULARITY);
  304. }
  305. /* otherwise, should granularity increase? */
  306. else if (cycles >= (wq->cycles.granularity))
  307. {
  308. if (cycles > wq->cycles.best)
  309. wq->cycles.best = cycles;
  310. /* along with yielded check, provides hysteresis for granularity */
  311. if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR
  312. * WQ_HYSTERESIS_FACTOR))
  313. wq->cycles.granularity *= WQ_HYSTERESIS_FACTOR; /* quick ramp-up */
  314. else if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR))
  315. wq->cycles.granularity += WQ_HYSTERESIS_FACTOR;
  316. }
  317. #undef WQ_HYSTERIS_FACTOR
  318. wq->runs++;
  319. wq->cycles.total += cycles;
  320. #if 0
  321. printf ("%s: cycles %d, new: best %d, worst %d\n",
  322. __func__, cycles, wq->cycles.best, wq->cycles.granularity);
  323. #endif
  324. /* Is the queue done yet? If it is, call the completion callback. */
  325. if (listcount (wq->items) > 0)
  326. work_queue_schedule (wq, 0);
  327. else if (wq->spec.completion_func)
  328. wq->spec.completion_func (wq);
  329. return 0;
  330. }