workqueue.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /*
  2. * Quagga Work Queue Support.
  3. *
  4. * Copyright (C) 2005 Sun Microsystems, Inc.
  5. *
  6. * This file is part of GNU Zebra.
  7. *
  8. * Quagga is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2, or (at your option) any
  11. * later version.
  12. *
  13. * Quagga is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with Quagga; see the file COPYING. If not, write to the Free
  20. * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  21. * 02111-1307, USA.
  22. */
  23. #include <lib/zebra.h>
  24. #include "thread.h"
  25. #include "memory.h"
  26. #include "workqueue.h"
  27. #include "linklist.h"
  28. #include "command.h"
  29. #include "log.h"
  30. /* master list of work_queues */
  31. static struct list _work_queues;
  32. /* pointer primarily to avoid an otherwise harmless warning on
  33. * ALL_LIST_ELEMENTS_RO
  34. */
  35. static struct list *work_queues = &_work_queues;
  36. #define WORK_QUEUE_MIN_GRANULARITY 1
  37. static struct work_queue_item *
  38. work_queue_item_new (struct work_queue *wq)
  39. {
  40. struct work_queue_item *item;
  41. assert (wq);
  42. item = XCALLOC (MTYPE_WORK_QUEUE_ITEM,
  43. sizeof (struct work_queue_item));
  44. return item;
  45. }
  46. static void
  47. work_queue_item_free (struct work_queue_item *item)
  48. {
  49. XFREE (MTYPE_WORK_QUEUE_ITEM, item);
  50. return;
  51. }
  52. /* create new work queue */
  53. struct work_queue *
  54. work_queue_new (struct thread_master *m, const char *queue_name)
  55. {
  56. struct work_queue *new;
  57. new = XCALLOC (MTYPE_WORK_QUEUE, sizeof (struct work_queue));
  58. if (new == NULL)
  59. return new;
  60. new->name = XSTRDUP (MTYPE_WORK_QUEUE_NAME, queue_name);
  61. new->master = m;
  62. SET_FLAG (new->flags, WQ_UNPLUGGED);
  63. if ( (new->items = list_new ()) == NULL)
  64. {
  65. XFREE (MTYPE_WORK_QUEUE_NAME, new->name);
  66. XFREE (MTYPE_WORK_QUEUE, new);
  67. return NULL;
  68. }
  69. new->items->del = (void (*)(void *)) work_queue_item_free;
  70. listnode_add (work_queues, new);
  71. new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
  72. /* Default values, can be overriden by caller */
  73. new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
  74. return new;
  75. }
  76. void
  77. work_queue_free (struct work_queue *wq)
  78. {
  79. if (wq->thread != NULL)
  80. thread_cancel(wq->thread);
  81. /* list_delete frees items via callback */
  82. list_delete (wq->items);
  83. listnode_delete (work_queues, wq);
  84. XFREE (MTYPE_WORK_QUEUE_NAME, wq->name);
  85. XFREE (MTYPE_WORK_QUEUE, wq);
  86. return;
  87. }
  88. static int
  89. work_queue_schedule (struct work_queue *wq, unsigned int delay)
  90. {
  91. /* if appropriate, schedule work queue thread */
  92. if ( CHECK_FLAG (wq->flags, WQ_UNPLUGGED)
  93. && (wq->thread == NULL)
  94. && (listcount (wq->items) > 0) )
  95. {
  96. wq->thread = thread_add_background (wq->master, work_queue_run,
  97. wq, delay);
  98. return 1;
  99. }
  100. else
  101. return 0;
  102. }
  103. void
  104. work_queue_add (struct work_queue *wq, void *data)
  105. {
  106. struct work_queue_item *item;
  107. assert (wq);
  108. if (!(item = work_queue_item_new (wq)))
  109. {
  110. zlog_err ("%s: unable to get new queue item", __func__);
  111. return;
  112. }
  113. item->data = data;
  114. listnode_add (wq->items, item);
  115. work_queue_schedule (wq, wq->spec.hold);
  116. return;
  117. }
  118. static void
  119. work_queue_item_remove (struct work_queue *wq, struct listnode *ln)
  120. {
  121. struct work_queue_item *item = listgetdata (ln);
  122. assert (item && item->data);
  123. /* call private data deletion callback if needed */
  124. if (wq->spec.del_item_data)
  125. wq->spec.del_item_data (wq, item->data);
  126. list_delete_node (wq->items, ln);
  127. work_queue_item_free (item);
  128. return;
  129. }
  130. static void
  131. work_queue_item_requeue (struct work_queue *wq, struct listnode *ln)
  132. {
  133. LISTNODE_DETACH (wq->items, ln);
  134. LISTNODE_ATTACH (wq->items, ln); /* attach to end of list */
  135. }
  136. DEFUN(show_work_queues,
  137. show_work_queues_cmd,
  138. "show work-queues",
  139. SHOW_STR
  140. "Work Queue information\n")
  141. {
  142. struct listnode *node;
  143. struct work_queue *wq;
  144. vty_out (vty,
  145. "%c %8s %5s %8s %21s%s",
  146. ' ', "List","(ms) ","Q. Runs","Cycle Counts ",
  147. VTY_NEWLINE);
  148. vty_out (vty,
  149. "%c %8s %5s %8s %7s %6s %6s %s%s",
  150. 'P',
  151. "Items",
  152. "Hold",
  153. "Total",
  154. "Best","Gran.","Avg.",
  155. "Name",
  156. VTY_NEWLINE);
  157. for (ALL_LIST_ELEMENTS_RO (work_queues, node, wq))
  158. {
  159. vty_out (vty,"%c %8d %5d %8ld %7d %6d %6u %s%s",
  160. (CHECK_FLAG (wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'),
  161. listcount (wq->items),
  162. wq->spec.hold,
  163. wq->runs,
  164. wq->cycles.best, wq->cycles.granularity,
  165. (wq->runs) ?
  166. (unsigned int) (wq->cycles.total / wq->runs) : 0,
  167. wq->name,
  168. VTY_NEWLINE);
  169. }
  170. return CMD_SUCCESS;
  171. }
  172. /* 'plug' a queue: Stop it from being scheduled,
  173. * ie: prevent the queue from draining.
  174. */
  175. void
  176. work_queue_plug (struct work_queue *wq)
  177. {
  178. if (wq->thread)
  179. thread_cancel (wq->thread);
  180. wq->thread = NULL;
  181. UNSET_FLAG (wq->flags, WQ_UNPLUGGED);
  182. }
  183. /* unplug queue, schedule it again, if appropriate
  184. * Ie: Allow the queue to be drained again
  185. */
  186. void
  187. work_queue_unplug (struct work_queue *wq)
  188. {
  189. SET_FLAG (wq->flags, WQ_UNPLUGGED);
  190. /* if thread isnt already waiting, add one */
  191. work_queue_schedule (wq, wq->spec.hold);
  192. }
  193. /* timer thread to process a work queue
  194. * will reschedule itself if required,
  195. * otherwise work_queue_item_add
  196. */
  197. int
  198. work_queue_run (struct thread *thread)
  199. {
  200. struct work_queue *wq;
  201. struct work_queue_item *item;
  202. wq_item_status ret;
  203. unsigned int cycles = 0;
  204. struct listnode *node, *nnode;
  205. char yielded = 0;
  206. wq = THREAD_ARG (thread);
  207. wq->thread = NULL;
  208. assert (wq && wq->items);
  209. /* calculate cycle granularity:
  210. * list iteration == 1 cycle
  211. * granularity == # cycles between checks whether we should yield.
  212. *
  213. * granularity should be > 0, and can increase slowly after each run to
  214. * provide some hysteris, but not past cycles.best or 2*cycles.
  215. *
  216. * Best: starts low, can only increase
  217. *
  218. * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
  219. * if we run to end of time slot, can increase otherwise
  220. * by a small factor.
  221. *
  222. * We could use just the average and save some work, however we want to be
  223. * able to adjust quickly to CPU pressure. Average wont shift much if
  224. * daemon has been running a long time.
  225. */
  226. if (wq->cycles.granularity == 0)
  227. wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
  228. for (ALL_LIST_ELEMENTS (wq->items, node, nnode, item))
  229. {
  230. assert (item && item->data);
  231. /* dont run items which are past their allowed retries */
  232. if (item->ran > wq->spec.max_retries)
  233. {
  234. /* run error handler, if any */
  235. if (wq->spec.errorfunc)
  236. wq->spec.errorfunc (wq, item->data);
  237. work_queue_item_remove (wq, node);
  238. continue;
  239. }
  240. /* run and take care of items that want to be retried immediately */
  241. do
  242. {
  243. ret = wq->spec.workfunc (wq, item->data);
  244. item->ran++;
  245. }
  246. while ((ret == WQ_RETRY_NOW)
  247. && (item->ran < wq->spec.max_retries));
  248. switch (ret)
  249. {
  250. case WQ_QUEUE_BLOCKED:
  251. {
  252. /* decrement item->ran again, cause this isn't an item
  253. * specific error, and fall through to WQ_RETRY_LATER
  254. */
  255. item->ran--;
  256. }
  257. case WQ_RETRY_LATER:
  258. {
  259. goto stats;
  260. }
  261. case WQ_REQUEUE:
  262. {
  263. item->ran--;
  264. work_queue_item_requeue (wq, node);
  265. break;
  266. }
  267. case WQ_RETRY_NOW:
  268. /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */
  269. case WQ_ERROR:
  270. {
  271. if (wq->spec.errorfunc)
  272. wq->spec.errorfunc (wq, item);
  273. }
  274. /* fall through here is deliberate */
  275. case WQ_SUCCESS:
  276. default:
  277. {
  278. work_queue_item_remove (wq, node);
  279. break;
  280. }
  281. }
  282. /* completed cycle */
  283. cycles++;
  284. /* test if we should yield */
  285. if ( !(cycles % wq->cycles.granularity)
  286. && thread_should_yield (thread))
  287. {
  288. yielded = 1;
  289. goto stats;
  290. }
  291. }
  292. stats:
  293. #define WQ_HYSTERESIS_FACTOR 4
  294. /* we yielded, check whether granularity should be reduced */
  295. if (yielded && (cycles < wq->cycles.granularity))
  296. {
  297. wq->cycles.granularity = ((cycles > 0) ? cycles
  298. : WORK_QUEUE_MIN_GRANULARITY);
  299. }
  300. /* otherwise, should granularity increase? */
  301. else if (cycles >= (wq->cycles.granularity))
  302. {
  303. if (cycles > wq->cycles.best)
  304. wq->cycles.best = cycles;
  305. /* along with yielded check, provides hysteresis for granularity */
  306. if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR
  307. * WQ_HYSTERESIS_FACTOR))
  308. wq->cycles.granularity *= WQ_HYSTERESIS_FACTOR; /* quick ramp-up */
  309. else if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR))
  310. wq->cycles.granularity += WQ_HYSTERESIS_FACTOR;
  311. }
  312. #undef WQ_HYSTERIS_FACTOR
  313. wq->runs++;
  314. wq->cycles.total += cycles;
  315. #if 0
  316. printf ("%s: cycles %d, new: best %d, worst %d\n",
  317. __func__, cycles, wq->cycles.best, wq->cycles.granularity);
  318. #endif
  319. /* Is the queue done yet? If it is, call the completion callback. */
  320. if (listcount (wq->items) > 0)
  321. work_queue_schedule (wq, 0);
  322. else if (wq->spec.completion_func)
  323. wq->spec.completion_func (wq);
  324. return 0;
  325. }