mirror of
https://github.com/git/git.git
synced 2024-11-15 15:03:47 +01:00
c553c72eed
This allows to run external commands in parallel with ordered output on stderr. If we run external commands in parallel we cannot pipe the output directly to the our stdout/err as it would mix up. So each process's output will flow through a pipe, which we buffer. One subprocess can be directly piped to out stdout/err for a low latency feedback to the user. Example: Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a different amount of time as the different submodules vary in size, then the output of fetches in sequential order might look like this: time --> output: |---A---| |-B-| |-------C-------| |-D-| |-E-| When we schedule these submodules into maximal two parallel processes, a schedule and sample output over time may look like this: process 1: |---A---| |-D-| |-E-| process 2: |-B-| |-------C-------| output: |---A---|B|---C-------|DE So A will be perceived as it would run normally in the single child version. As B has finished by the time A is done, we can dump its whole progress buffer on stderr, such that it looks like it finished in no time. Once that is done, C is determined to be the visible child and its progress will be reported in real time. So this way of output is really good for human consumption, as it only changes the timing, not the actual output. For machine consumption the output needs to be prepared in the tasks, by either having a prefix per line or per block to indicate whose tasks output is displayed, because the output order may not follow the original sequential ordering: |----A----| |--B--| |-C-| will be scheduled to be all parallel: process 1: |----A----| process 2: |--B--| process 3: |-C-| output: |----A----|CB This happens because C finished before B did, so it will be queued for output before B. To detect when a child has finished executing, we check interleaved with other actions (such as checking the liveliness of children or starting new processes) whether the stderr pipe still exists. Once a child closed its stderr stream, we assume it is terminating very soon, and use `finish_command()` from the single external process execution interface to collect the exit status. By maintaining the strong assumption of stderr being open until the very end of a child process, we can avoid other hassle such as an implementation using `waitpid(-1)`, which is not implemented in Windows. Signed-off-by: Stefan Beller <sbeller@google.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
89 lines
2.0 KiB
C
89 lines
2.0 KiB
C
/*
|
|
* test-run-command.c: test run command API.
|
|
*
|
|
* (C) 2009 Ilari Liusvaara <ilari.liusvaara@elisanet.fi>
|
|
*
|
|
* This code is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include "git-compat-util.h"
|
|
#include "run-command.h"
|
|
#include "argv-array.h"
|
|
#include "strbuf.h"
|
|
#include <string.h>
|
|
#include <errno.h>
|
|
|
|
static int number_callbacks;
|
|
static int parallel_next(struct child_process *cp,
|
|
struct strbuf *err,
|
|
void *cb,
|
|
void **task_cb)
|
|
{
|
|
struct child_process *d = cb;
|
|
if (number_callbacks >= 4)
|
|
return 0;
|
|
|
|
argv_array_pushv(&cp->args, d->argv);
|
|
strbuf_addf(err, "preloaded output of a child\n");
|
|
number_callbacks++;
|
|
return 1;
|
|
}
|
|
|
|
static int no_job(struct child_process *cp,
|
|
struct strbuf *err,
|
|
void *cb,
|
|
void **task_cb)
|
|
{
|
|
strbuf_addf(err, "no further jobs available\n");
|
|
return 0;
|
|
}
|
|
|
|
static int task_finished(int result,
|
|
struct child_process *cp,
|
|
struct strbuf *err,
|
|
void *pp_cb,
|
|
void *pp_task_cb)
|
|
{
|
|
strbuf_addf(err, "asking for a quick stop\n");
|
|
return 1;
|
|
}
|
|
|
|
int main(int argc, char **argv)
|
|
{
|
|
struct child_process proc = CHILD_PROCESS_INIT;
|
|
int jobs;
|
|
|
|
if (argc < 3)
|
|
return 1;
|
|
proc.argv = (const char **)argv + 2;
|
|
|
|
if (!strcmp(argv[1], "start-command-ENOENT")) {
|
|
if (start_command(&proc) < 0 && errno == ENOENT)
|
|
return 0;
|
|
fprintf(stderr, "FAIL %s\n", argv[1]);
|
|
return 1;
|
|
}
|
|
if (!strcmp(argv[1], "run-command"))
|
|
exit(run_command(&proc));
|
|
|
|
jobs = atoi(argv[2]);
|
|
proc.argv = (const char **)argv + 3;
|
|
|
|
if (!strcmp(argv[1], "run-command-parallel"))
|
|
exit(run_processes_parallel(jobs, parallel_next,
|
|
NULL, NULL, &proc));
|
|
|
|
if (!strcmp(argv[1], "run-command-abort"))
|
|
exit(run_processes_parallel(jobs, parallel_next,
|
|
NULL, task_finished, &proc));
|
|
|
|
if (!strcmp(argv[1], "run-command-no-jobs"))
|
|
exit(run_processes_parallel(jobs, no_job,
|
|
NULL, task_finished, &proc));
|
|
|
|
fprintf(stderr, "check usage\n");
|
|
return 1;
|
|
}
|