# memory, a pile up of processes could cause thrashing
# otherwise. The fd of the lock is stored in
# IKIWIKI_CGILOCK_FD so unlockwiki can close it.
- $pre_exec=<<"EOF";
+ #
+ # A lot of cgi wrapper processes can potentially build
+ # up and clog an otherwise unloaded web server. To
+ # partially avoid this, when a GET comes in and the lock
+ # is already held, rather than blocking a html page is
+ # constructed that retries. This is enabled by setting
+ # cgi_overload_delay.
+ if (defined $config{cgi_overload_delay} &&
+ $config{cgi_overload_delay} =~/^[0-9]+/) {
+ my $i=int($config{cgi_overload_delay});
+ $pre_exec.="#define CGI_OVERLOAD_DELAY $i\n"
+ if $i > 0;
+ }
+ $pre_exec.=<<"EOF";
lockfd=open("$config{wikistatedir}/cgilock", O_CREAT | O_RDWR, 0666);
- if (lockfd != -1 && lockf(lockfd, F_LOCK, 0) == 0) {
- char *fd_s=malloc(8);
- sprintf(fd_s, "%i", lockfd);
- setenv("IKIWIKI_CGILOCK_FD", fd_s, 1);
+ if (lockfd != -1) {
+#ifdef CGI_OVERLOAD_DELAY
+ char *request_method = getenv("REQUEST_METHOD");
+ if (request_method && strcmp(request_method, "GET") == 0) {
+ if (lockf(lockfd, F_TLOCK, 0) == 0) {
+ set_cgilock_fd(lockfd);
+ }
+ else {
+ printf("Content-Type: text/html\\nRefresh: %i; URL=%s\\n\\n<html><head><title>please wait...</title><head><body><p>Please wait ...</p></body></html>",
+ CGI_OVERLOAD_DELAY,
+ getenv("REQUEST_URI"));
+ exit(0);
+ }
+ }
+ else if (lockf(lockfd, F_LOCK, 0) == 0) {
+ set_cgilock_fd(lockfd);
+ }
+#else
+ if (lockf(lockfd, F_LOCK, 0) == 0) {
+ set_cgilock_fd(lockfd);
+ }
+#endif
}
EOF
}
newenviron[i++]=s;
}
+set_cgilock_fd (int lockfd) {
+ char *fd_s=malloc(8);
+ sprintf(fd_s, "%i", lockfd);
+ setenv("IKIWIKI_CGILOCK_FD", fd_s, 1);
+}
+
int main (int argc, char **argv) {
int lockfd=-1;
char *s;
* monochrome: New theme, contributed by Jon Dowland.
* rst: Ported to python 3, while still also being valid python 2.
Thanks, W. Trevor King
+ * Try to avoid a situation in which so many ikiwiki cgi wrapper programs
+ are running, all waiting on some long-running thing like a site rebuild,
+ that it prevents the web server from doing anything else. The current
+ approach only avoids this problem for GET requests; if multiple cgi's
+ run GETs on a site at the same time, one will display a "please wait"
+ page for a configurable number of seconds, which then redirects to retry.
+ To enable this protection, set cgi_overload_delay to the number of
+ seconds to wait. This is not enabled by default.
-- Joey Hess <joeyh@debian.org> Thu, 30 Aug 2012 11:56:12 -0400