OpenDNSSEC-enforcer  2.0.4
schedule.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2009 NLNet Labs. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
19  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
21  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
22  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
23  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  *
25  */
26 
38 #include "config.h"
39 
40 #include <ldns/ldns.h>
41 #include <pthread.h>
42 #include <signal.h>
43 
44 #include "scheduler/schedule.h"
45 #include "scheduler/task.h"
46 #include "duration.h"
47 #include "log.h"
48 
49 static const char* schedule_str = "scheduler";
50 
51 /* Condition must be accessible from ISR */
52 static pthread_cond_t *schedule_cond;
53 
54 static task_type* get_first_task(schedule_type *schedule);
55 
63 static void*
64 alarm_handler(sig_atomic_t sig)
65 {
66  switch (sig) {
67  case SIGALRM:
68  ods_log_debug("[%s] SIGALRM received", schedule_str);
69  /* normally a signal is locked to prevent race conditions.
70  * We MUST NOT lock this. This function is called by the
71  * main thread as interrupt which might have acquired
72  * the lock. */
73  pthread_cond_signal(schedule_cond);
74  break;
75  default:
76  ods_log_debug("[%s] Spurious signal %d received",
77  schedule_str, (int)sig);
78  }
79  return NULL;
80 }
81 
87 static void
88 set_alarm(schedule_type* schedule)
89 {
90  time_t now = time_now();
91  task_type *task = get_first_task(schedule);
92  if (!task || task->when == -1) {
93  ods_log_debug("[%s] no alarm set", schedule_str);
94  } else if (task->when == 0 || task->when <= now) {
95  ods_log_debug("[%s] signal now", schedule_str);
96  pthread_cond_signal(&schedule->schedule_cond);
97  } else {
98  ods_log_debug("[%s] SIGALRM set", schedule_str);
99  alarm(task->when - now);
100  }
101 }
102 
107 static ldns_rbnode_t*
108 task2node(task_type* task)
109 {
110  ldns_rbnode_t* node = (ldns_rbnode_t*) malloc(sizeof(ldns_rbnode_t));
111  if (node) {
112  node->key = task;
113  node->data = task;
114  }
115  return node;
116 }
117 
125 static task_type*
126 get_first_task(schedule_type* schedule)
127 {
128  ldns_rbnode_t* first_node;
129 
130  if (!schedule || !schedule->tasks) return NULL;
131  first_node = ldns_rbtree_first(schedule->tasks);
132  if (!first_node) return NULL;
133  return (task_type*) first_node->data;
134 }
135 
143 static task_type*
144 pop_first_task(schedule_type* schedule)
145 {
146  ldns_rbnode_t *node, *delnode;
147  task_type *task;
148 
149  if (!schedule || !schedule->tasks) return NULL;
150  node = ldns_rbtree_first(schedule->tasks);
151  if (!node) return NULL;
152  delnode = ldns_rbtree_delete(schedule->tasks, node->data);
153  /* delnode == node, but we don't free it just yet, data is shared
154  * with tasks_by_name tree */
155  if (!delnode) return NULL;
156  delnode = ldns_rbtree_delete(schedule->tasks_by_name, node->data);
157  free(node);
158  if (!delnode) return NULL;
159  task = (task_type*) delnode->data;
160  free(delnode); /* this delnode != node */
161  set_alarm(schedule);
162  return task;
163 }
164 
169 static void
170 task_delfunc(ldns_rbnode_t* elem, int del_payload)
171 {
172  task_type* task;
173 
174  if (elem && elem != LDNS_RBTREE_NULL) {
175  task = (task_type*) elem->data;
176  task_delfunc(elem->left, del_payload);
177  task_delfunc(elem->right, del_payload);
178  if (del_payload)
179  task_cleanup(task);
180  free((void*)elem);
181  }
182 }
183 
190 {
191  schedule_type* schedule;
192  struct sigaction action;
193 
194  schedule = (schedule_type*) malloc(sizeof(schedule_type));
195  if (!schedule) {
196  ods_log_error("[%s] unable to create: malloc failed", schedule_str);
197  return NULL;
198  }
199 
200  schedule->tasks = ldns_rbtree_create(task_compare);
201  schedule->tasks_by_name = ldns_rbtree_create(task_compare_name);
202  pthread_mutex_init(&schedule->schedule_lock, NULL);
203  pthread_cond_init(&schedule->schedule_cond, NULL);
204  /* static condition for alarm. Must be accessible from interrupt */
205  schedule_cond = &schedule->schedule_cond;
206 
207  action.sa_handler = (void (*)(int))&alarm_handler;
208  sigfillset(&action.sa_mask);
209  action.sa_flags = 0;
210  sigaction(SIGALRM, &action, NULL);
211 
212  return schedule;
213 }
214 
219 void
221 {
222  if (!schedule) return;
223  ods_log_debug("[%s] cleanup schedule", schedule_str);
224 
225  /* Disable any pending alarm before we destroy the pthread stuff
226  * to prevent segfaults */
227  alarm(0);
228 
229  if (schedule->tasks) {
230  task_delfunc(schedule->tasks->root, 1);
231  task_delfunc(schedule->tasks_by_name->root, 1);
232  ldns_rbtree_free(schedule->tasks);
233  ldns_rbtree_free(schedule->tasks_by_name);
234  schedule->tasks = NULL;
235  }
236  pthread_mutex_destroy(&schedule->schedule_lock);
237  pthread_cond_destroy(&schedule->schedule_cond);
238  free(schedule);
239 }
240 
245 time_t
247 {
248  task_type* task;
249  time_t when;
250 
251  if (!schedule || !schedule->tasks) return -1;
252 
253  pthread_mutex_lock(&schedule->schedule_lock);
254  task = get_first_task(schedule);
255  if (!task)
256  when = -1;
257  else if (task->flush)
258  when = 0;
259  else
260  when = task->when;
261  pthread_mutex_unlock(&schedule->schedule_lock);
262  return when;
263 }
264 
265 size_t
267 {
268  if (!schedule || !schedule->tasks) return 0;
269  return schedule->tasks->count;
270 }
271 
272 void
274 {
275  ldns_rbnode_t* node;
276  task_type* task;
277 
278  ods_log_debug("[%s] flush all tasks", schedule_str);
279  if (!schedule || !schedule->tasks) return;
280 
281  pthread_mutex_lock(&schedule->schedule_lock);
282  node = ldns_rbtree_first(schedule->tasks);
283  while (node && node != LDNS_RBTREE_NULL) {
284  task = (task_type*) node->data;
285  /*
286  * TODO BUG? schedule_flush_type() sets when to zero, this does not.
287  * Whos right and whos wrong?
288  */
289  task->flush = 1;
290  node = ldns_rbtree_next(node);
291  }
292  /* wakeup! work to do! */
293  pthread_cond_signal(&schedule->schedule_cond);
294  pthread_mutex_unlock(&schedule->schedule_lock);
295 }
296 
297 int
299 {
300  ldns_rbnode_t *node, *nextnode;
301  int nflushed = 0;
302 
303  ods_log_debug("[%s] flush task", schedule_str);
304  if (!schedule || !schedule->tasks) return 0;
305 
306  pthread_mutex_lock(&schedule->schedule_lock);
307  node = ldns_rbtree_first(schedule->tasks);
308  while (node && node != LDNS_RBTREE_NULL) {
309  nextnode = ldns_rbtree_next(node);
310  if (node->data && ((task_type*)node->data)->what == id) {
311  /* Merely setting flush is not enough. We must set it
312  * to the front of the queue as well. */
313  node = ldns_rbtree_delete(schedule->tasks, node->data);
314  if (!node) break; /* stange, bail out */
315  if (node->data) { /* task */
316  ((task_type*)node->data)->flush = 1;
317  /* This is important for our tests only. If a task is
318  * set to flush it should not affect the current time.
319  * Otherwise timeleap will advance time. */
320  ((task_type*)node->data)->when = time_now();
321  if (!ldns_rbtree_insert(schedule->tasks, node)) {
322  ods_log_crit("[%s] Could not reschedule task "
323  "after flush. A task has been lost!",
324  schedule_str);
325  free(node);
326  /* Do not free node->data it is still in use
327  * by the other rbtree. */
328  break;
329  }
330  nflushed++;
331  }
332  }
333  node = nextnode;
334  }
335  /* wakeup! work to do! */
336  pthread_cond_signal(&schedule->schedule_cond);
337  pthread_mutex_unlock(&schedule->schedule_lock);
338  return nflushed;
339 }
340 
341 void
343 {
344  ldns_rbnode_t* node;
345 
346  if (!schedule || !schedule->tasks) return;
347 
348  pthread_mutex_lock(&schedule->schedule_lock);
349  /* don't attempt to free payload, still referenced by other tree*/
350  while ((node = ldns_rbtree_first(schedule->tasks)) !=
351  LDNS_RBTREE_NULL)
352  {
353  node = ldns_rbtree_delete(schedule->tasks, node->data);
354  if (node == 0) break;
355  free(node);
356  }
357  /* also clean up name tree */
358  while ((node = ldns_rbtree_first(schedule->tasks_by_name)) !=
359  LDNS_RBTREE_NULL)
360  {
361  node = ldns_rbtree_delete(schedule->tasks_by_name, node->data);
362  if (node == 0) break;
363  task_cleanup((task_type*) node->data);
364  free(node);
365  }
366  pthread_mutex_unlock(&schedule->schedule_lock);
367 }
368 
369 task_type*
371 {
372  time_t now = time_now();
373  task_type* task;
374 
375  pthread_mutex_lock(&schedule->schedule_lock);
376  task = get_first_task(schedule);
377  if (!task || (!task->flush && (task->when == -1 || task->when > now))) {
378  /* nothing to do now, sleep and wait for signal */
379  pthread_cond_wait(&schedule->schedule_cond,
380  &schedule->schedule_lock);
381  task = NULL;
382  } else {
383  task = pop_first_task(schedule);
384  }
385  pthread_mutex_unlock(&schedule->schedule_lock);
386  return task;
387 }
388 
389 task_type*
391 {
392  task_type* task;
393 
394  pthread_mutex_lock(&schedule->schedule_lock);
395  task = pop_first_task(schedule);
396  pthread_mutex_unlock(&schedule->schedule_lock);
397  return task;
398 }
399 
400 ods_status
402 {
403  ldns_rbnode_t *node1, *node2;
404  ods_status status;
405  task_type* task2;
406 
407  if (!task) {
408  ods_log_error("[%s] unable to schedule task: no task", schedule_str);
409  return ODS_STATUS_ERR;
410  }
411  task->flush = 0;
412  if (!schedule || !schedule->tasks) {
413  ods_log_error("[%s] unable to schedule task: no schedule",
414  schedule_str);
415  return ODS_STATUS_ERR;
416  }
417 
418  ods_log_debug("[%s] schedule task [%s] for %s", schedule_str,
419  task_what2str(task->what), task_who2str(task->who));
420 
421  pthread_mutex_lock(&schedule->schedule_lock);
422  status = ODS_STATUS_ERR;
423  if ((node1 = task2node(task))) {
424  if (ldns_rbtree_insert(schedule->tasks_by_name, node1)) {
425  if ((node2 = task2node(task))) {
426  if(ldns_rbtree_insert(schedule->tasks, node2)) {
427  /* success inserting in two trees */
428  set_alarm(schedule);
429  status = ODS_STATUS_OK;
430  } else { /* insert in tasks tree failed */
431  ods_log_error("[%s] unable to schedule task [%s] for %s: "
432  " already present", schedule_str, task_what2str(task->what),
433  task_who2str(task->who));
434  /* this will free node1 */
435  free(ldns_rbtree_delete(schedule->tasks_by_name, node1));
436  free(node2);
437  }
438  } else { /* could not alloc node2 */
439  /* this will free node1 */
440  free(ldns_rbtree_delete(schedule->tasks_by_name, node1));
441  }
442 
443  } else {/* insert in name tree failed */
444  free(node1);
449  /* still in lock guaranteed to succeed. */
450  node1 = ldns_rbtree_search(schedule->tasks_by_name, task);
451  /* This copy of 'task' is referenced by both trees */
452  task2 = (task_type*)node1->key;
453  node1 = ldns_rbtree_delete(schedule->tasks, task2);
454  if (task->when < task2->when)
455  task2->when = task->when;
456  if (task2->context && task2->clean_context) {
457  task2->clean_context(task2);
458  }
459  task2->context = task->context;
460  task2->clean_context = task->clean_context;
461  task->context = NULL;
462  task_cleanup(task);
463  (void) ldns_rbtree_insert(schedule->tasks, node1);
464  /* node1 now owned by tree */
465  node1 = NULL;
466  set_alarm(schedule);
467  status = ODS_STATUS_OK;
468  }
469  } /* else {failure) */
470  pthread_mutex_unlock(&schedule->schedule_lock);
471  return status;
472 }
473 
474 void
476 {
477  pthread_mutex_lock(&schedule->schedule_lock);
478  pthread_cond_broadcast(&schedule->schedule_cond);
479  pthread_mutex_unlock(&schedule->schedule_lock);
480 }
schedule_type * schedule_create()
Definition: schedule.c:189
void ods_log_debug(const char *format,...)
Definition: log.c:41
time_t when
Definition: task.h:63
char * who
Definition: task.h:66
pthread_mutex_t schedule_lock
Definition: schedule.h:52
task_type * schedule_pop_first_task(schedule_type *schedule)
Definition: schedule.c:390
int flush
Definition: task.h:65
void schedule_purge(schedule_type *schedule)
Definition: schedule.c:342
void * context
Definition: task.h:68
pthread_cond_t schedule_cond
Definition: schedule.h:51
ods_status schedule_task(schedule_type *schedule, task_type *task)
Definition: schedule.c:401
void ods_log_error(const char *format,...)
Definition: log.c:69
enum task_id_enum task_id
Definition: task.h:53
void ods_log_crit(const char *format,...)
Definition: log.c:80
void schedule_release_all(schedule_type *schedule)
Definition: schedule.c:475
time_t schedule_time_first(schedule_type *schedule)
Definition: schedule.c:246
task_type * schedule_pop_task(schedule_type *schedule)
Definition: schedule.c:370
void task_cleanup(task_type *task)
Definition: task.c:147
const char * task_what2str(int what)
Definition: task.c:212
task_type *(* clean_context)(task_type *task)
Definition: task.h:70
ldns_rbtree_t * tasks_by_name
Definition: schedule.h:50
int schedule_flush_type(schedule_type *schedule, task_id id)
Definition: schedule.c:298
const char * task_who2str(const char *who)
Definition: task.c:240
task_id what
Definition: task.h:60
ldns_rbtree_t * tasks
Definition: schedule.h:49
void schedule_cleanup(schedule_type *schedule)
Definition: schedule.c:220
int task_compare(const void *a, const void *b)
Definition: task.c:170
int task_compare_name(const void *a, const void *b)
Definition: task.c:195
void schedule_flush(schedule_type *schedule)
Definition: schedule.c:273
size_t schedule_taskcount(schedule_type *schedule)
Definition: schedule.c:266