3 #include <sys/select.h>
7 #include <sys/signalfd.h>
8 #include <sys/resource.h>
13 static int child_count;
14 static int parent_count;
15 static int job_request_fd[2];
16 static int job_get_permission_fd[2];
18 static unsigned int max_jobs;
19 static unsigned int job_count;
20 static unsigned int jobs_pending;
21 static unsigned int max_jobs_pending;
25 int (*work_fn)(void *);
27 struct work_struct *next;
31 struct work_struct *work;
37 struct work_queue work_queues[WORK_PRIORITIES_NUM] = {
39 .name = "high priority",
41 .name = "high_prio_queue",
42 .lock = PTHREAD_MUTEX_INITIALIZER,
46 .name = "low priority",
48 .name = "low_prio_queue",
49 .lock = PTHREAD_MUTEX_INITIALIZER,
54 struct mutex work_pending_mutex = {
55 .name = "work_pending",
56 .lock = PTHREAD_MUTEX_INITIALIZER,
58 pthread_cond_t work_pending_cond = PTHREAD_COND_INITIALIZER;
60 static int run_work_on_queue(struct work_queue *queue)
62 struct work_struct *work;
64 mutex_lock(&queue->lock);
67 pr_info("No work to run on queue %s\n", queue->name);
68 mutex_unlock(&queue->lock);
74 queue->work = work->next;
78 * If queue is not empty, try waking up more workers. It is
79 * possible that when work were queued, the first worker did
80 * not wake up soon enough and
82 if (queue->length > 0)
83 pthread_cond_signal(&work_pending_cond);
85 mutex_unlock(&queue->lock);
87 pr_info("Executing work %s from queue %s, %d still pending\n",
88 work->name, queue->name, queue->length);
90 work->work_fn(work->arg);
91 pr_info("Work %s done\n", work->name);
97 static void *worker_thread(void *arg)
103 snprintf(name, sizeof(name), "worker%ld", (long)arg);
104 pthread_setname_np(pthread_self(), name);
108 int prio, work_done = 0;
111 * Execute as many works from the queues as
112 * there are, starting from highest priority
115 for (prio = 0; prio < WORK_PRIORITIES_NUM; prio++) {
117 run_work_on_queue(&work_queues[prio]);
126 pr_info("Worker going to sleep\n");
127 ret = pthread_cond_wait(&work_pending_cond,
128 &work_pending_mutex.lock);
130 pr_err("Error: %m\n");
132 mutex_lock_acquired(&work_pending_mutex);
134 mutex_unlock(&work_pending_mutex);
141 int queue_work(unsigned int priority, char *name,
142 int (work_fn)(void *arg), void *arg)
144 struct work_queue *queue;
145 struct work_struct *work, *last_work;
147 if (priority >= WORK_PRIORITIES_NUM) {
148 pr_err("Invalid priority: %d\n", priority);
152 work = calloc(sizeof(*work), 1);
155 work->work_fn = work_fn;
158 queue = &work_queues[priority];
160 /* Insert new work at the end of the work queue */
161 mutex_lock(&queue->lock);
163 last_work = queue->work;
164 while (last_work && last_work->next)
165 last_work = last_work->next;
170 last_work->next = work;
172 pr_info("Inserted work %s in queue %s, with %d pending items\n",
173 work->name, queue->name, queue->length);
175 mutex_unlock(&queue->lock);
177 pthread_cond_signal(&work_pending_cond);
182 int get_child_count(void)
187 int get_parent_count(void)
192 static int handle_signals(struct event_handler *h)
194 struct signalfd_siginfo siginfo;
197 ret = read(h->fd, &siginfo, sizeof(siginfo));
198 if (ret < sizeof(siginfo)) {
199 pr_err("Expected %zd from read, got %d: %m\n",
200 sizeof(siginfo), ret);
204 if (siginfo.ssi_signo != SIGCHLD) {
205 pr_err("Unexpected signal %d, ignoring\n", siginfo.ssi_signo);
209 harvest_zombies(siginfo.ssi_pid);
214 static int grant_new_job(void)
220 pr_info("Granting new job. %d jobs currently and %d pending\n",
221 job_count, jobs_pending);
223 ret = write(job_get_permission_fd[1], &byte, 1);
225 pr_err("Failed to write 1 byte: %m\n");
232 static int deny_job(void)
237 pr_info("Denying new job. %d jobs currently and %d pending, "
238 "limit of pending jobs is %d\n",
239 job_count, jobs_pending, max_jobs_pending);
241 ret = write(job_get_permission_fd[1], &byte, 1);
243 pr_err("Failed to write 1 byte: %m\n");
250 static int handle_job_request(struct event_handler *h)
254 ret = read(job_request_fd[0], &pid, sizeof(pid));
256 pr_err("Failed to read: %m\n");
261 pr_info("Read zero bytes\n");
266 if (job_count >= max_jobs) {
267 if (jobs_pending < max_jobs_pending)
272 ret = grant_new_job();
275 } else if (pid < 0) {
276 if (job_count > max_jobs)
277 pr_err("BUG: Job %u jobs exceeded limit %u\n",
278 job_count, max_jobs);
280 pr_info("Job %d finished\n", -pid);
284 ret = grant_new_job();
292 struct event_handler signal_handler = {
293 .handle_event = handle_signals,
298 struct event_handler job_request_handler = {
299 .handle_event = handle_job_request,
301 .name = "job_request",
305 * Initialize the jobcontrol.
307 * Create the pipes that are used to grant children execution
308 * permissions. If max_jobs is zero, count the number of CPUs from
309 * /proc/cpuinfo and use that.
311 int init_jobcontrol(int max_jobs_requested)
321 if (pipe2(job_request_fd, O_NONBLOCK | O_CLOEXEC)) {
322 pr_err("Failed to create pipe: %m\n");
326 if (pipe2(job_get_permission_fd, O_CLOEXEC)) {
327 pr_err("Failed to create pipe: %m\n");
331 epoll_fd = epoll_create(1);
332 if (epoll_fd == -1) {
333 pr_err("Failed to epoll_create(): %m\n");
337 job_request_handler.fd = job_request_fd[0];
338 register_event_handler(&job_request_handler);
340 sigemptyset(&sigmask);
341 sigaddset(&sigmask, SIGCHLD);
343 signal_handler.fd = signalfd(-1, &sigmask, SFD_CLOEXEC);
344 if (job_request_handler.fd < 0) {
345 pr_err("Failed to create signal_fd: %m\n");
349 register_event_handler(&signal_handler);
351 if (max_jobs_requested > 0) {
352 max_jobs = max_jobs_requested;
357 file = fopen("/proc/cpuinfo", "ro");
359 pr_err("Failed to open /proc/cpuinfo: %m\n");
364 * The CPU count algorithm simply reads the first 8 bytes from
365 * the /proc/cpuinfo and then expects that line to be there as
366 * many times as there are CPUs.
368 ret = fread(match, 1, sizeof(match), file);
369 if (ret < sizeof(match)) {
370 pr_err("read %d bytes when expecting %zd %m\n",
375 while(fgets(buf, sizeof(buf), file)) {
376 if (!strncmp(buf, match, sizeof(match)))
385 pr_info("Set maximum number of parallel jobs to %d\n", max_jobs);
387 max_jobs_pending = max_jobs * 10 + 25;
388 pr_info("Set maximum number of pending jobs to %d\n", max_jobs_pending);
390 /* Create worker threads */
391 thread = calloc(sizeof(*thread), max_jobs);
392 for (i = 0; i < max_jobs; i++)
393 pthread_create(&thread[i], NULL, worker_thread, (void *)i);
398 int poll_job_requests(int timeout)
400 struct epoll_event event;
401 struct event_handler *job_handler;
404 /* Convert positive seconds to milliseconds */
405 timeout = timeout > 0 ? 1000 * timeout : timeout;
407 ret = epoll_wait(epoll_fd, &event, 1, timeout);
410 if (errno != EINTR) {
411 pr_err("epoll_wait: %m\n");
416 * If epoll_wait() was interrupted, better start
417 * everything again from the beginning
423 pr_info("Timed out\n");
427 job_handler = event.data.ptr;
429 if (!job_handler || !job_handler->handle_event) {
430 pr_err("Corrupted event handler for fd %d\n",
435 pr_debug("Running handler %s to handle events from fd %d\n",
436 job_handler->name, job_handler->fd);
437 job_handler->handle_event(job_handler);
440 pr_info("Jobs active: %u, pending: %u\n", job_count, jobs_pending);
445 * Per process flag indicating whether this child has requested fork
446 * limiting. If it has, it must also tell the master parent when it
447 * has died so that the parent can give next pending job permission to
450 static int is_limited_fork;
457 pr_err("fork() failed: %m\n");
463 pr_debug("Fork %d, child %d\n", child_count, child);
468 * Also do not notify the master parent the death of this
469 * child. Only childs that have been created with
470 * do_fork_limited() can have this flag set.
475 * Close unused ends of the job control pipes. Only the parent
476 * which controls the jobs may have the write end open of the
477 * job_get_permission_fd and the read end of the
478 * job_request_fd. Failing to close the pipe ends properly
479 * will cause the childs to wait forever for the run
480 * permission in case parent dies prematurely.
482 * Note! The file descriptor must be closed once and only
483 * once. They are marked to -1 to make it impossible for
484 * subseqent do_fork() calls from closing them again (in which
485 * case some other file descriptor might already be reserved
486 * for the same number) and prevent accidentally closing some
487 * innocent file descriptors that are still in use.
489 if (job_get_permission_fd[1] >= 0) {
490 close(job_get_permission_fd[1]);
491 job_get_permission_fd[1] = -1;
493 if (job_request_fd[0] >= 0) {
494 close(job_request_fd[0]);
495 job_request_fd[0] = -1;
498 /* reset child's child count */
504 static int request_fork(int request)
508 pid = request > 0 ? pid : -pid;
510 return write(job_request_fd[1], &pid, sizeof(pid));
513 static void limited_fork_exit_handler(void)
520 * Like do_fork(), but allow the child continue only after the global
521 * job count is low enough.
523 * We allow the parent to continue other more important activities but
524 * child respects the limit of global active processes.
526 int do_fork_limited(void)
535 /* Remember to notify the parent when we are done */
536 atexit(limited_fork_exit_handler);
539 pr_debug("Requesting permission to go\n");
541 /* Signal the parent that we are here, waiting to go */
545 * The parent will tell us when we can continue. If there were
546 * multiple children waiting for their turn to run only one
547 * will be reading the content byte from the pipe and getting
548 * the permission to run.
550 ret = read(job_get_permission_fd[0], &byte, sizeof(byte));
552 pr_err("Error requesting run, did the parent die?\n");
555 pr_err("Job control request failure: %m\n");
558 pr_info("Did not get permission to execute. Terminating\n");
561 * Avoid running exit handler, that would tell the
562 * parent we died normally and decrement the job
568 pr_debug("Continuing\n");
572 int harvest_zombies(int pid)
575 struct rusage rusage;
576 char *status_str = NULL;
579 if (child_count == 0)
583 pr_debug("Waiting on pid %d, children left: %d\n", pid,
587 pid = wait4(pid, &status, 0, &rusage);
589 pr_err("Error on waitid(): %m\n");
592 /* Wait until the child has become a zombie */
593 } while (!WIFEXITED(status) && !WIFSIGNALED(status));
596 if (WIFEXITED(status)) {
597 status_str = "exited with status";
598 code = WEXITSTATUS(status);
599 } else if (WIFSIGNALED(status)) {
600 status_str = "killed by signal";
601 code = WTERMSIG(status);
603 pr_debug("pid %d: %s %d. Children left: %d\n", pid,
604 status_str, code, child_count);
605 pr_debug("pid %d: User time: %ld.%03lds, System %ld.%03lds\n", pid,
606 (long)rusage.ru_utime.tv_sec, rusage.ru_utime.tv_usec / 1000,
607 (long)rusage.ru_stime.tv_sec, rusage.ru_stime.tv_usec / 1000);
613 * Runs a command cmd with params argv, connects stdin and stdout to
616 * Returns the pid of the executed process
618 int run_piped(const char *cmd, char *const argv[],
619 int *stdinfd, int *stdoutfd, int *stderrfd)
621 int ifd[2], ofd[2], efd[2], pid;
623 pr_info("Running command %s\n", cmd);
625 if (stdinfd && pipe(ifd)) {
626 pr_err("pipe() failed: %m\n");
630 if (stdoutfd && pipe(ofd)) {
631 pr_err("pipe() failed: %m\n");
635 if (stderrfd && pipe(efd)) {
636 pr_err("pipe() failed: %m\n");
641 if (pid) { /* Parent side */
662 dup2(ifd[0], STDIN_FILENO);
667 dup2(ofd[1], STDOUT_FILENO);
672 dup2(efd[1], STDERR_FILENO);
675 /* Now we have redirected standard streams to parent process */
677 pr_err("Failed to execv command %s: %m\n", cmd);
684 * Runs a command cmd with params argv, connects stdin and stdout to
687 * Returns the pid of the executed process
689 int run_piped_stream(const char *cmd, char *const argv[],
690 FILE **stdinf, FILE **stdoutf, FILE **stderrf)
692 int ifd, ofd, efd, pid;
708 pid = run_piped(cmd, argv, i, o, e);
711 *stdinf = fdopen(ifd, "r");
712 if (*stdinf == NULL) {
713 pr_err("Error opening file stream for fd %d: %m\n",
720 *stdoutf = fdopen(ofd, "r");
721 if (*stdoutf == NULL) {
722 pr_err("Error opening file stream for fd %d: %m\n",
729 *stderrf = fdopen(efd, "r");
730 if (*stderrf == NULL) {
731 pr_err("Error opening file stream for fd %d: %m\n",
741 * Forks a child and executes a command to run on parallel
744 #define max(a,b) (a) < (b) ? (b) : (a)
745 #define BUF_SIZE (128*1024)
746 int run(const char *cmd, char *const argv[])
754 child = run_piped(cmd, argv, NULL, &ofd, &efd);
766 maxfd = max(ofd, efd);
767 error = select(maxfd, &rfds, NULL, NULL, NULL);
770 pr_err("Error with select: %m\n");
774 if (FD_ISSET(ofd, &rfds)) {
775 bytes = read(ofd, rbuf, BUF_SIZE);
779 if (FD_ISSET(efd, &rfds)) {
781 bytes = read(efd, rbuf, BUF_SIZE);
785 pr_err("select() returned unknown fd\n");
790 pr_err("read() failed: %m\n");
795 * Workaround: When a process had die and it has only
796 * written to stderr, select() doesn't indicate that
797 * there might be something to read in stderr fd. To
798 * work around this issue, we try to read stderr just
799 * in case in order to ensure everything gets read.
802 bytes = read(efd, rbuf, BUF_SIZE);
812 pr_err("%s: stderr: %s\n",
815 pr_info("%s: stdout: %s\n",
826 harvest_zombies(child);
831 int register_event_handler(struct event_handler *handler)
833 struct epoll_event ev;
836 if (handler->fd <= 0) {
837 pr_err("Invalid file descriptor of %d\n", handler->fd);
841 if (!handler->handle_event) {
842 pr_err("Handler callback missing\n");
846 pr_info("Registering handler for %s, fd %d\n",
847 handler->name, handler->fd);
849 ev.data.fd = handler->fd;
850 ev.data.ptr = handler;
851 ev.events = handler->events;
852 ret = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, handler->fd, &ev);
854 pr_err("Failed to add epoll_fd: %m\n");
861 void _mutex_lock_acquired(struct mutex *lock, char *file, int line)
867 int _mutex_lock(struct mutex *lock, char *file, int line)
871 if (!pthread_mutex_trylock(&lock->lock))
874 pr_info("Lock contention on lock %s on %s:%d\n",
875 lock->name, lock->file, lock->line);
877 ret = pthread_mutex_lock(&lock->lock);
879 pr_err("Acquirin lock %s failed: %m, acquired %s:%d\n",
880 lock->name, lock->file, lock->line);
883 _mutex_lock_acquired(lock, file, line);
887 int _mutex_unlock(struct mutex *lock)
891 pthread_mutex_unlock(&lock->lock);