workqueue.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /*
  2. * Quagga Work Queue Support.
  3. *
  4. * Copyright (C) 2005 Sun Microsystems, Inc.
  5. *
  6. * This file is part of GNU Zebra.
  7. *
  8. * Quagga is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2, or (at your option) any
  11. * later version.
  12. *
  13. * Quagga is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with Quagga; see the file COPYING. If not, write to the Free
  20. * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  21. * 02111-1307, USA.
  22. */
  23. #include <lib/zebra.h>
  24. #include "thread.h"
  25. #include "memory.h"
  26. #include "workqueue.h"
  27. #include "linklist.h"
  28. #include "command.h"
  29. #include "log.h"
  30. /* master list of work_queues */
  31. static struct list work_queues;
  32. #define WORK_QUEUE_MIN_GRANULARITY 1
  33. static struct work_queue_item *
  34. work_queue_item_new (struct work_queue *wq)
  35. {
  36. struct work_queue_item *item;
  37. assert (wq);
  38. item = XCALLOC (MTYPE_WORK_QUEUE_ITEM,
  39. sizeof (struct work_queue_item));
  40. return item;
  41. }
  42. static void
  43. work_queue_item_free (struct work_queue_item *item)
  44. {
  45. XFREE (MTYPE_WORK_QUEUE_ITEM, item);
  46. return;
  47. }
  48. /* create new work queue */
  49. struct work_queue *
  50. work_queue_new (struct thread_master *m, const char *queue_name)
  51. {
  52. struct work_queue *new;
  53. new = XCALLOC (MTYPE_WORK_QUEUE, sizeof (struct work_queue));
  54. if (new == NULL)
  55. return new;
  56. new->name = XSTRDUP (MTYPE_WORK_QUEUE_NAME, queue_name);
  57. new->master = m;
  58. SET_FLAG (new->flags, WQ_UNPLUGGED);
  59. if ( (new->items = list_new ()) == NULL)
  60. {
  61. XFREE (MTYPE_WORK_QUEUE_NAME, new->name);
  62. XFREE (MTYPE_WORK_QUEUE, new);
  63. return NULL;
  64. }
  65. new->items->del = (void (*)(void *)) work_queue_item_free;
  66. listnode_add (&work_queues, new);
  67. new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
  68. /* Default values, can be overriden by caller */
  69. new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
  70. return new;
  71. }
  72. void
  73. work_queue_free (struct work_queue *wq)
  74. {
  75. if (wq->thread != NULL)
  76. thread_cancel(wq->thread);
  77. /* list_delete frees items via callback */
  78. list_delete (wq->items);
  79. listnode_delete (&work_queues, wq);
  80. XFREE (MTYPE_WORK_QUEUE_NAME, wq->name);
  81. XFREE (MTYPE_WORK_QUEUE, wq);
  82. return;
  83. }
  84. static inline int
  85. work_queue_schedule (struct work_queue *wq, unsigned int delay)
  86. {
  87. /* if appropriate, schedule work queue thread */
  88. if ( CHECK_FLAG (wq->flags, WQ_UNPLUGGED)
  89. && (wq->thread == NULL)
  90. && (listcount (wq->items) > 0) )
  91. {
  92. wq->thread = thread_add_background (wq->master, work_queue_run,
  93. wq, delay);
  94. return 1;
  95. }
  96. else
  97. return 0;
  98. }
  99. void
  100. work_queue_add (struct work_queue *wq, void *data)
  101. {
  102. struct work_queue_item *item;
  103. assert (wq);
  104. if (!(item = work_queue_item_new (wq)))
  105. {
  106. zlog_err ("%s: unable to get new queue item", __func__);
  107. return;
  108. }
  109. item->data = data;
  110. listnode_add (wq->items, item);
  111. work_queue_schedule (wq, wq->spec.hold);
  112. return;
  113. }
  114. static void
  115. work_queue_item_remove (struct work_queue *wq, struct listnode *ln)
  116. {
  117. struct work_queue_item *item = listgetdata (ln);
  118. assert (item && item->data);
  119. /* call private data deletion callback if needed */
  120. if (wq->spec.del_item_data)
  121. wq->spec.del_item_data (wq, item->data);
  122. list_delete_node (wq->items, ln);
  123. work_queue_item_free (item);
  124. return;
  125. }
  126. static void
  127. work_queue_item_requeue (struct work_queue *wq, struct listnode *ln)
  128. {
  129. LISTNODE_DETACH (wq->items, ln);
  130. LISTNODE_ATTACH (wq->items, ln); /* attach to end of list */
  131. }
  132. DEFUN(show_work_queues,
  133. show_work_queues_cmd,
  134. "show work-queues",
  135. SHOW_STR
  136. "Work Queue information\n")
  137. {
  138. struct listnode *node;
  139. struct work_queue *wq;
  140. vty_out (vty,
  141. "%c %8s %5s %8s %21s%s",
  142. ' ', "List","(ms) ","Q. Runs","Cycle Counts ",
  143. VTY_NEWLINE);
  144. vty_out (vty,
  145. "%c %8s %5s %8s %7s %6s %6s %s%s",
  146. 'P',
  147. "Items",
  148. "Hold",
  149. "Total",
  150. "Best","Gran.","Avg.",
  151. "Name",
  152. VTY_NEWLINE);
  153. for (ALL_LIST_ELEMENTS_RO ((&work_queues), node, wq))
  154. {
  155. vty_out (vty,"%c %8d %5d %8ld %7d %6d %6u %s%s",
  156. (CHECK_FLAG (wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'),
  157. listcount (wq->items),
  158. wq->spec.hold,
  159. wq->runs,
  160. wq->cycles.best, wq->cycles.granularity,
  161. (wq->runs) ?
  162. (unsigned int) (wq->cycles.total / wq->runs) : 0,
  163. wq->name,
  164. VTY_NEWLINE);
  165. }
  166. return CMD_SUCCESS;
  167. }
  168. /* 'plug' a queue: Stop it from being scheduled,
  169. * ie: prevent the queue from draining.
  170. */
  171. void
  172. work_queue_plug (struct work_queue *wq)
  173. {
  174. if (wq->thread)
  175. thread_cancel (wq->thread);
  176. wq->thread = NULL;
  177. UNSET_FLAG (wq->flags, WQ_UNPLUGGED);
  178. }
  179. /* unplug queue, schedule it again, if appropriate
  180. * Ie: Allow the queue to be drained again
  181. */
  182. void
  183. work_queue_unplug (struct work_queue *wq)
  184. {
  185. SET_FLAG (wq->flags, WQ_UNPLUGGED);
  186. /* if thread isnt already waiting, add one */
  187. work_queue_schedule (wq, wq->spec.hold);
  188. }
  189. /* timer thread to process a work queue
  190. * will reschedule itself if required,
  191. * otherwise work_queue_item_add
  192. */
  193. int
  194. work_queue_run (struct thread *thread)
  195. {
  196. struct work_queue *wq;
  197. struct work_queue_item *item;
  198. wq_item_status ret;
  199. unsigned int cycles = 0;
  200. struct listnode *node, *nnode;
  201. char yielded = 0;
  202. wq = THREAD_ARG (thread);
  203. wq->thread = NULL;
  204. assert (wq && wq->items);
  205. /* calculate cycle granularity:
  206. * list iteration == 1 cycle
  207. * granularity == # cycles between checks whether we should yield.
  208. *
  209. * granularity should be > 0, and can increase slowly after each run to
  210. * provide some hysteris, but not past cycles.best or 2*cycles.
  211. *
  212. * Best: starts low, can only increase
  213. *
  214. * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
  215. * if we run to end of time slot, can increase otherwise
  216. * by a small factor.
  217. *
  218. * We could use just the average and save some work, however we want to be
  219. * able to adjust quickly to CPU pressure. Average wont shift much if
  220. * daemon has been running a long time.
  221. */
  222. if (wq->cycles.granularity == 0)
  223. wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
  224. for (ALL_LIST_ELEMENTS (wq->items, node, nnode, item))
  225. {
  226. assert (item && item->data);
  227. /* dont run items which are past their allowed retries */
  228. if (item->ran > wq->spec.max_retries)
  229. {
  230. /* run error handler, if any */
  231. if (wq->spec.errorfunc)
  232. wq->spec.errorfunc (wq, item->data);
  233. work_queue_item_remove (wq, node);
  234. continue;
  235. }
  236. /* run and take care of items that want to be retried immediately */
  237. do
  238. {
  239. ret = wq->spec.workfunc (wq, item->data);
  240. item->ran++;
  241. }
  242. while ((ret == WQ_RETRY_NOW)
  243. && (item->ran < wq->spec.max_retries));
  244. switch (ret)
  245. {
  246. case WQ_QUEUE_BLOCKED:
  247. {
  248. /* decrement item->ran again, cause this isn't an item
  249. * specific error, and fall through to WQ_RETRY_LATER
  250. */
  251. item->ran--;
  252. }
  253. case WQ_RETRY_LATER:
  254. {
  255. goto stats;
  256. }
  257. case WQ_REQUEUE:
  258. {
  259. item->ran--;
  260. work_queue_item_requeue (wq, node);
  261. break;
  262. }
  263. case WQ_RETRY_NOW:
  264. /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */
  265. case WQ_ERROR:
  266. {
  267. if (wq->spec.errorfunc)
  268. wq->spec.errorfunc (wq, item);
  269. }
  270. /* fall through here is deliberate */
  271. case WQ_SUCCESS:
  272. default:
  273. {
  274. work_queue_item_remove (wq, node);
  275. break;
  276. }
  277. }
  278. /* completed cycle */
  279. cycles++;
  280. /* test if we should yield */
  281. if ( !(cycles % wq->cycles.granularity)
  282. && thread_should_yield (thread))
  283. {
  284. yielded = 1;
  285. goto stats;
  286. }
  287. }
  288. stats:
  289. #define WQ_HYSTERESIS_FACTOR 4
  290. /* we yielded, check whether granularity should be reduced */
  291. if (yielded && (cycles < wq->cycles.granularity))
  292. {
  293. wq->cycles.granularity = ((cycles > 0) ? cycles
  294. : WORK_QUEUE_MIN_GRANULARITY);
  295. }
  296. /* otherwise, should granularity increase? */
  297. else if (cycles >= (wq->cycles.granularity))
  298. {
  299. if (cycles > wq->cycles.best)
  300. wq->cycles.best = cycles;
  301. /* along with yielded check, provides hysteresis for granularity */
  302. if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR
  303. * WQ_HYSTERESIS_FACTOR))
  304. wq->cycles.granularity *= WQ_HYSTERESIS_FACTOR; /* quick ramp-up */
  305. else if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR))
  306. wq->cycles.granularity += WQ_HYSTERESIS_FACTOR;
  307. }
  308. #undef WQ_HYSTERIS_FACTOR
  309. wq->runs++;
  310. wq->cycles.total += cycles;
  311. #if 0
  312. printf ("%s: cycles %d, new: best %d, worst %d\n",
  313. __func__, cycles, wq->cycles.best, wq->cycles.granularity);
  314. #endif
  315. /* Is the queue done yet? If it is, call the completion callback. */
  316. if (listcount (wq->items) > 0)
  317. work_queue_schedule (wq, 0);
  318. else if (wq->spec.completion_func)
  319. wq->spec.completion_func (wq);
  320. return 0;
  321. }