From 6995b510759cf531d70745b7d0c6e8a0d9010b06 Mon Sep 17 00:00:00 2001 From: Christopher Odenbach Date: Wed, 31 May 2017 14:15:47 +0200 Subject: repaired "-n" behaviour. If run with "-n ok" a host which ran completely out of swap space would return "ok" which is not desired. It should only return "ok" if there is no swap space configured at all. diff --git a/plugins/check_swap.c b/plugins/check_swap.c index 4d5a407..0ff0c77 100644 --- a/plugins/check_swap.c +++ b/plugins/check_swap.c @@ -51,7 +51,7 @@ const char *email = "devel@monitoring-plugins.org"; # define SWAP_CONVERSION 1 #endif -int check_swap (int usp, float free_swap_mb); +int check_swap (int usp, float free_swap_mb, float total_swap_mb); int process_arguments (int argc, char **argv); int validate_arguments (void); void print_usage (void); @@ -128,7 +128,7 @@ main (int argc, char **argv) percent=100.0; else percent = 100 * (((double) dskused_mb) / ((double) dsktotal_mb)); - result = max_state (result, check_swap (percent, dskfree_mb)); + result = max_state (result, check_swap (percent, dskfree_mb, dsktotal_mb)); if (verbose) xasprintf (&status, "%s [%.0f (%d%%)]", status, dskfree_mb, 100 - percent); } @@ -227,7 +227,7 @@ main (int argc, char **argv) free_swap_mb += dskfree_mb; if (allswaps) { percent = 100 * (((double) dskused_mb) / ((double) dsktotal_mb)); - result = max_state (result, check_swap (percent, dskfree_mb)); + result = max_state (result, check_swap (percent, dskfree_mb, dsktotal_mb)); if (verbose) xasprintf (&status, "%s [%.0f (%d%%)]", status, dskfree_mb, 100 - percent); } @@ -289,7 +289,7 @@ main (int argc, char **argv) if(allswaps && dsktotal_mb > 0){ percent = 100 * (((double) dskused_mb) / ((double) dsktotal_mb)); - result = max_state (result, check_swap (percent, dskfree_mb)); + result = max_state (result, check_swap (percent, dskfree_mb, dsktotal_mb)); if (verbose) { xasprintf (&status, "%s [%.0f (%d%%)]", status, dskfree_mb, 100 - percent); } @@ -328,7 +328,7 @@ main (int argc, char **argv) if(allswaps && dsktotal_mb > 0){ percent = 100 * (((double) dskused_mb) / ((double) dsktotal_mb)); - result = max_state (result, check_swap (percent, dskfree_mb)); + result = max_state (result, check_swap (percent, dskfree_mb, dsktotal_mb)); if (verbose) { xasprintf (&status, "%s [%.0f (%d%%)]", status, dskfree_mb, 100 - percent); } @@ -355,7 +355,7 @@ main (int argc, char **argv) status = "- Swap is either disabled, not present, or of zero size. "; } - result = max_state (result, check_swap (percent_used, free_swap_mb)); + result = max_state (result, check_swap (percent_used, free_swap_mb, total_swap_mb)); printf (_("SWAP %s - %d%% free (%d MB out of %d MB) %s|"), state_text (result), (100 - percent_used), (int) free_swap_mb, (int) total_swap_mb, status); @@ -372,10 +372,10 @@ main (int argc, char **argv) int -check_swap (int usp, float free_swap_mb) +check_swap (int usp, float free_swap_mb, float total_swap_mb) { - if (!free_swap_mb) return no_swap_state; + if (!total_swap_mb) return no_swap_state; int result = STATE_UNKNOWN; float free_swap = free_swap_mb * (1024 * 1024); /* Convert back to bytes as warn and crit specified in bytes */ -- cgit v0.10-9-g596f From 9671399c00fa62a708efb7371eeba64e4e73fd94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20S=C3=A1nchez?= Date: Wed, 6 Sep 2017 16:54:03 +0200 Subject: Adding packages-warning option This option specifies the minumum number of packages available for upgrade to return WARNING status. Default is 1 package. diff --git a/plugins/check_apt.c b/plugins/check_apt.c index b69680c..d7be575 100644 --- a/plugins/check_apt.c +++ b/plugins/check_apt.c @@ -86,6 +86,8 @@ static char *do_include = NULL; /* regexp to only include certain packages */ static char *do_exclude = NULL; /* regexp to only exclude certain packages */ static char *do_critical = NULL; /* regexp specifying critical packages */ static char *input_filename = NULL; /* input filename for testing */ +/* number of packages available for upgrade to return WARNING status */ +static int packages_warning = 1; /* other global variables */ static int stderr_warning = 0; /* if a cmd issued output on stderr */ @@ -117,7 +119,7 @@ int main (int argc, char **argv) { if(sec_count > 0){ result = max_state(result, STATE_CRITICAL); - } else if(packages_available > 0 && only_critical == 0){ + } else if(packages_available >= packages_warning && only_critical == 0){ result = max_state(result, STATE_WARNING); } else if(result > STATE_UNKNOWN){ result = STATE_UNKNOWN; @@ -170,11 +172,12 @@ int process_arguments (int argc, char **argv) { {"critical", required_argument, 0, 'c'}, {"only-critical", no_argument, 0, 'o'}, {"input-file", required_argument, 0, INPUT_FILE_OPT}, + {"packages-warning", required_argument, 0, 'w'}, {0, 0, 0, 0} }; while(1) { - c = getopt_long(argc, argv, "hVvt:u::U::d::nli:e:c:o", longopts, NULL); + c = getopt_long(argc, argv, "hVvt:u::U::d::nli:e:c:ow:", longopts, NULL); if(c == -1 || c == EOF || c == 1) break; @@ -233,6 +236,9 @@ int process_arguments (int argc, char **argv) { case INPUT_FILE_OPT: input_filename = optarg; break; + case 'w': + packages_warning = atoi(optarg); + break; default: /* print short usage statement if args not parsable */ usage5(); @@ -530,7 +536,10 @@ print_help (void) printf (" %s\n", "-o, --only-critical"); printf (" %s\n", _("Only warn about upgrades matching the critical list. The total number")); printf (" %s\n", _("of upgrades will be printed, but any non-critical upgrades will not cause")); - printf (" %s\n\n", _("the plugin to return WARNING status.")); + printf (" %s\n", _("the plugin to return WARNING status.")); + printf (" %s\n", "-w, --packages-warning"); + printf (" %s\n", _("Minumum number of packages available for upgrade to return WARNING status.")); + printf (" %s\n\n", _("Default is 1 package.")); printf ("%s\n\n", _("The following options require root privileges and should be used with care:")); printf (" %s\n", "-u, --update=OPTS"); @@ -548,5 +557,5 @@ void print_usage(void) { printf ("%s\n", _("Usage:")); - printf ("%s [[-d|-u|-U]opts] [-n] [-l] [-t timeout]\n", progname); + printf ("%s [[-d|-u|-U]opts] [-n] [-l] [-t timeout] [-w packages-warning]\n", progname); } -- cgit v0.10-9-g596f From 1410ffff281bc82d423459198cb4ea3f393b1e92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20S=C3=A1nchez?= Date: Thu, 7 Sep 2017 13:31:01 +0200 Subject: Adding print top consuming processes option to check_load -W, --print-top-warning Print top consuming processes on WARNING status -C, --print-top-critical Print top consuming processes on CRITICAL status -n, --procs-to-show=NUMBER_OF_PROCS Number of processes to show when printing top consuming processes. Not useful without -W or -C. Default value is 5 diff --git a/plugins/check_load.c b/plugins/check_load.c index b1cc498..b6cfc65 100644 --- a/plugins/check_load.c +++ b/plugins/check_load.c @@ -33,6 +33,7 @@ const char *copyright = "1999-2007"; const char *email = "devel@monitoring-plugins.org"; #include "common.h" +#include "runcmd.h" #include "utils.h" #include "popen.h" @@ -52,6 +53,11 @@ static int process_arguments (int argc, char **argv); static int validate_arguments (void); void print_help (void); void print_usage (void); +static int print_top_consuming_processes(); + +static int n_procs_to_show = 5; +static int print_top_procs_on_warning = 0; +static int print_top_procs_on_critical = 0; /* strictly for pretty-print usage in loops */ static const int nums[3] = { 1, 5, 15 }; @@ -210,6 +216,11 @@ main (int argc, char **argv) printf("load%d=%.3f;%.3f;%.3f;0; ", nums[i], la[i], wload[i], cload[i]); putchar('\n'); + if (result == STATE_CRITICAL && print_top_procs_on_critical) { + print_top_consuming_processes(); + } else if (result == STATE_WARNING && print_top_procs_on_warning) { + print_top_consuming_processes(); + } return result; } @@ -227,6 +238,9 @@ process_arguments (int argc, char **argv) {"percpu", no_argument, 0, 'r'}, {"version", no_argument, 0, 'V'}, {"help", no_argument, 0, 'h'}, + {"print-top-warning", no_argument, 0, 'W'}, + {"print-top-critical", no_argument, 0, 'C'}, + {"procs-to-show", required_argument, 0, 'n'}, {0, 0, 0, 0} }; @@ -234,7 +248,7 @@ process_arguments (int argc, char **argv) return ERROR; while (1) { - c = getopt_long (argc, argv, "Vhrc:w:", longopts, &option); + c = getopt_long (argc, argv, "Vhrc:w:WCn:", longopts, &option); if (c == -1 || c == EOF) break; @@ -255,6 +269,15 @@ process_arguments (int argc, char **argv) case 'h': /* help */ print_help (); exit (STATE_UNKNOWN); + case 'n': + n_procs_to_show = atoi(optarg); + break; + case 'W': + print_top_procs_on_warning = 1; + break; + case 'C': + print_top_procs_on_critical = 1; + break; case '?': /* help */ usage5 (); } @@ -324,6 +347,13 @@ print_help (void) printf (" %s\n", _("the load average format is the same used by \"uptime\" and \"w\"")); printf (" %s\n", "-r, --percpu"); printf (" %s\n", _("Divide the load averages by the number of CPUs (when possible)")); + printf (" %s\n", "-W, --print-top-warning"); + printf (" %s\n", _("Print top consuming processes on WARNING status")); + printf (" %s\n", "-C, --print-top-critical"); + printf (" %s\n", _("Print top consuming processes on CRITICAL status")); + printf (" %s\n", "-n, --procs-to-show=NUMBER_OF_PROCS"); + printf (" %s\n", _("Number of processes to show when printing top consuming")); + printf (" %s\n", _("processes. Not useful without -W or -C. Default value is 5")); printf (UT_SUPPORT); } @@ -332,5 +362,21 @@ void print_usage (void) { printf ("%s\n", _("Usage:")); - printf ("%s [-r] -w WLOAD1,WLOAD5,WLOAD15 -c CLOAD1,CLOAD5,CLOAD15\n", progname); + printf ("%s [-r] -w WLOAD1,WLOAD5,WLOAD15 -c CLOAD1,CLOAD5,CLOAD15 [-W] [-C] [-n NUMBER_OF_PROCS]\n", progname); +} + +static int print_top_consuming_processes() { + int i = 0; + struct output chld_out, chld_err; + char *cmdline = "/bin/ps -aux --sort=-pcpu"; + if(np_runcmd(cmdline, &chld_out, &chld_err, 0) != 0){ + fprintf(stderr, _("'%s' exited with non-zero status.\n"), cmdline); + return STATE_UNKNOWN; + } + int lines_to_show = chld_out.lines < (n_procs_to_show + 1) + ? chld_out.lines : n_procs_to_show + 1; + for (i = 0; i < lines_to_show; i += 1) { + printf("%s\n", chld_out.line[i]); + } + return OK; } -- cgit v0.10-9-g596f From 0625fbb53db9401bdce95b1a403a20980a326ec4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20S=C3=A1nchez?= Date: Thu, 7 Sep 2017 13:55:19 +0200 Subject: Making show top consuming processes option less complicated diff --git a/plugins/check_load.c b/plugins/check_load.c index b6cfc65..5d5c115 100644 --- a/plugins/check_load.c +++ b/plugins/check_load.c @@ -55,9 +55,7 @@ void print_help (void); void print_usage (void); static int print_top_consuming_processes(); -static int n_procs_to_show = 5; -static int print_top_procs_on_warning = 0; -static int print_top_procs_on_critical = 0; +static int n_procs_to_show = 0; /* strictly for pretty-print usage in loops */ static const int nums[3] = { 1, 5, 15 }; @@ -216,9 +214,7 @@ main (int argc, char **argv) printf("load%d=%.3f;%.3f;%.3f;0; ", nums[i], la[i], wload[i], cload[i]); putchar('\n'); - if (result == STATE_CRITICAL && print_top_procs_on_critical) { - print_top_consuming_processes(); - } else if (result == STATE_WARNING && print_top_procs_on_warning) { + if (n_procs_to_show > 0) { print_top_consuming_processes(); } return result; @@ -238,8 +234,6 @@ process_arguments (int argc, char **argv) {"percpu", no_argument, 0, 'r'}, {"version", no_argument, 0, 'V'}, {"help", no_argument, 0, 'h'}, - {"print-top-warning", no_argument, 0, 'W'}, - {"print-top-critical", no_argument, 0, 'C'}, {"procs-to-show", required_argument, 0, 'n'}, {0, 0, 0, 0} }; @@ -248,7 +242,7 @@ process_arguments (int argc, char **argv) return ERROR; while (1) { - c = getopt_long (argc, argv, "Vhrc:w:WCn:", longopts, &option); + c = getopt_long (argc, argv, "Vhrc:w:n:", longopts, &option); if (c == -1 || c == EOF) break; @@ -272,12 +266,6 @@ process_arguments (int argc, char **argv) case 'n': n_procs_to_show = atoi(optarg); break; - case 'W': - print_top_procs_on_warning = 1; - break; - case 'C': - print_top_procs_on_critical = 1; - break; case '?': /* help */ usage5 (); } @@ -347,13 +335,9 @@ print_help (void) printf (" %s\n", _("the load average format is the same used by \"uptime\" and \"w\"")); printf (" %s\n", "-r, --percpu"); printf (" %s\n", _("Divide the load averages by the number of CPUs (when possible)")); - printf (" %s\n", "-W, --print-top-warning"); - printf (" %s\n", _("Print top consuming processes on WARNING status")); - printf (" %s\n", "-C, --print-top-critical"); - printf (" %s\n", _("Print top consuming processes on CRITICAL status")); printf (" %s\n", "-n, --procs-to-show=NUMBER_OF_PROCS"); - printf (" %s\n", _("Number of processes to show when printing top consuming")); - printf (" %s\n", _("processes. Not useful without -W or -C. Default value is 5")); + printf (" %s\n", _("Number of processes to show when printing the top consuming processes.")); + printf (" %s\n", _("NUMBER_OF_PROCS=0 disables this feature. Default value is 0")); printf (UT_SUPPORT); } @@ -362,7 +346,7 @@ void print_usage (void) { printf ("%s\n", _("Usage:")); - printf ("%s [-r] -w WLOAD1,WLOAD5,WLOAD15 -c CLOAD1,CLOAD5,CLOAD15 [-W] [-C] [-n NUMBER_OF_PROCS]\n", progname); + printf ("%s [-r] -w WLOAD1,WLOAD5,WLOAD15 -c CLOAD1,CLOAD5,CLOAD15 [-n NUMBER_OF_PROCS]\n", progname); } static int print_top_consuming_processes() { -- cgit v0.10-9-g596f From 015a40e1b590bb847328d51bdfc4b544ae8825d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20S=C3=A1nchez?= Date: Thu, 7 Sep 2017 20:55:34 +0200 Subject: Using PS_COMMAND constant and ordering output by procpcpu diff --git a/plugins/check_load.c b/plugins/check_load.c index 5d5c115..6fd895f 100644 --- a/plugins/check_load.c +++ b/plugins/check_load.c @@ -349,14 +349,37 @@ print_usage (void) printf ("%s [-r] -w WLOAD1,WLOAD5,WLOAD15 -c CLOAD1,CLOAD5,CLOAD15 [-n NUMBER_OF_PROCS]\n", progname); } +int cmpstringp(const void *p1, const void *p2) { + int procuid = 0; + int procpid = 0; + int procppid = 0; + int procvsz = 0; + int procrss = 0; + float procpcpu = 0; + char procstat[8]; +#ifdef PS_USES_PROCETIME + char procetime[MAX_INPUT_BUFFER]; +#endif /* PS_USES_PROCETIME */ + char procprog[MAX_INPUT_BUFFER]; + int pos; + sscanf (* (char * const *) p1, PS_FORMAT, PS_VARLIST); + float procpcpu1 = procpcpu; + sscanf (* (char * const *) p2, PS_FORMAT, PS_VARLIST); + return procpcpu1 < procpcpu; +} + static int print_top_consuming_processes() { int i = 0; struct output chld_out, chld_err; - char *cmdline = "/bin/ps -aux --sort=-pcpu"; - if(np_runcmd(cmdline, &chld_out, &chld_err, 0) != 0){ - fprintf(stderr, _("'%s' exited with non-zero status.\n"), cmdline); + if(np_runcmd(PS_COMMAND, &chld_out, &chld_err, 0) != 0){ + fprintf(stderr, _("'%s' exited with non-zero status.\n"), PS_COMMAND); + return STATE_UNKNOWN; + } + if (chld_out.lines < 2) { + fprintf(stderr, _("some error occurred getting procs list.\n")); return STATE_UNKNOWN; } + qsort(chld_out.line + 1, chld_out.lines - 1, sizeof(char*), cmpstringp); int lines_to_show = chld_out.lines < (n_procs_to_show + 1) ? chld_out.lines : n_procs_to_show + 1; for (i = 0; i < lines_to_show; i += 1) { -- cgit v0.10-9-g596f From c03e1ad081bc080cb8085bc14a94e4965a8e119e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20S=C3=A1nchez?= Date: Thu, 7 Sep 2017 22:25:09 +0200 Subject: Only turn on ordering procs by CPU usage if PS_USES_PROCPCPU Disable sorting of procs by CPU usage on check_load if procpcpu is not present on PS_VARLIST diff --git a/configure.ac b/configure.ac index bf12995..08a0e78 100644 --- a/configure.ac +++ b/configure.ac @@ -1016,6 +1016,10 @@ if test -n "$ac_cv_ps_varlist" ; then AC_DEFINE(PS_USES_PROCETIME,"yes", [Whether the ps utility uses the "procetime" field]) fi + if echo "$ac_cv_ps_varlist" | grep "procpcpu" >/dev/null; then + AC_DEFINE(PS_USES_PROCPCPU,"yes", + [Whether the ps utility uses the "procpcpu" field]) + fi fi AC_PATH_PROG(PATH_TO_PING,ping) diff --git a/plugins/check_load.c b/plugins/check_load.c index 6fd895f..bf7b94b 100644 --- a/plugins/check_load.c +++ b/plugins/check_load.c @@ -349,6 +349,7 @@ print_usage (void) printf ("%s [-r] -w WLOAD1,WLOAD5,WLOAD15 -c CLOAD1,CLOAD5,CLOAD15 [-n NUMBER_OF_PROCS]\n", progname); } +#ifdef PS_USES_PROCPCPU int cmpstringp(const void *p1, const void *p2) { int procuid = 0; int procpid = 0; @@ -367,6 +368,7 @@ int cmpstringp(const void *p1, const void *p2) { sscanf (* (char * const *) p2, PS_FORMAT, PS_VARLIST); return procpcpu1 < procpcpu; } +#endif /* PS_USES_PROCPCPU */ static int print_top_consuming_processes() { int i = 0; @@ -379,7 +381,9 @@ static int print_top_consuming_processes() { fprintf(stderr, _("some error occurred getting procs list.\n")); return STATE_UNKNOWN; } +#ifdef PS_USES_PROCPCPU qsort(chld_out.line + 1, chld_out.lines - 1, sizeof(char*), cmpstringp); +#endif /* PS_USES_PROCPCPU */ int lines_to_show = chld_out.lines < (n_procs_to_show + 1) ? chld_out.lines : n_procs_to_show + 1; for (i = 0; i < lines_to_show; i += 1) { -- cgit v0.10-9-g596f From 9fa449119ccd9f57df9e12d4597804efd2dee3ba Mon Sep 17 00:00:00 2001 From: Michael Kraus Date: Fri, 19 Jan 2018 15:13:12 +0100 Subject: check_snmp: make calcualtion of timeout value in help output more clear diff --git a/plugins/check_snmp.c b/plugins/check_snmp.c index da9638c..88b8f38 100644 --- a/plugins/check_snmp.c +++ b/plugins/check_snmp.c @@ -1207,8 +1207,9 @@ print_help (void) printf (" %s\n", _("Separates output on multiple OID requests")); printf (UT_CONN_TIMEOUT, DEFAULT_SOCKET_TIMEOUT); + printf (" %s\n", _("NOTE the final timeout value is calculated using this formula: timeout_interval * retries + 5")); printf (" %s\n", "-e, --retries=INTEGER"); - printf (" %s\n", _("Number of retries to be used in the requests")); + printf (" %s%i%s\n", _("Number of retries to be used in the requests (default: "), DEFAULT_RETRIES, _(")")); printf (" %s\n", "-O, --perf-oids"); printf (" %s\n", _("Label performance data with OIDs instead of --label's")); -- cgit v0.10-9-g596f From b7de6ec8bd4a2df63295734d8d5af2401830b1c2 Mon Sep 17 00:00:00 2001 From: Sven Nierlein Date: Fri, 19 Jan 2018 17:05:46 +0100 Subject: Update check_snmp.c remove unnecessary brackets in help text diff --git a/plugins/check_snmp.c b/plugins/check_snmp.c index 88b8f38..e8a21a4 100644 --- a/plugins/check_snmp.c +++ b/plugins/check_snmp.c @@ -1209,7 +1209,7 @@ print_help (void) printf (UT_CONN_TIMEOUT, DEFAULT_SOCKET_TIMEOUT); printf (" %s\n", _("NOTE the final timeout value is calculated using this formula: timeout_interval * retries + 5")); printf (" %s\n", "-e, --retries=INTEGER"); - printf (" %s%i%s\n", _("Number of retries to be used in the requests (default: "), DEFAULT_RETRIES, _(")")); + printf (" %s%i\n", _("Number of retries to be used in the requests, default: "), DEFAULT_RETRIES); printf (" %s\n", "-O, --perf-oids"); printf (" %s\n", _("Label performance data with OIDs instead of --label's")); -- cgit v0.10-9-g596f From e60c6c04f04022a72bf3a1074402342e948c8c0f Mon Sep 17 00:00:00 2001 From: Rolf Eike Beer Date: Wed, 25 Jul 2018 19:05:16 +0200 Subject: check_dns: simplify loop diff --git a/plugins/check_dns.c b/plugins/check_dns.c index f206163..ae4123b 100644 --- a/plugins/check_dns.c +++ b/plugins/check_dns.c @@ -168,8 +168,8 @@ main (int argc, char **argv) temp_buffer++; /* Strip leading spaces */ - for (; *temp_buffer != '\0' && *temp_buffer == ' '; temp_buffer++) - /* NOOP */; + while (*temp_buffer == ' ') + temp_buffer++; strip(temp_buffer); if (temp_buffer==NULL || strlen(temp_buffer)==0) { -- cgit v0.10-9-g596f From a03068743f3694cbdbe84bb6e802a41f270cc105 Mon Sep 17 00:00:00 2001 From: Rolf Eike Beer Date: Wed, 25 Jul 2018 20:18:47 +0200 Subject: check_dns: allow returned addresses to be in any order diff --git a/NEWS b/NEWS index 2db2a2c..ac06aa6 100644 --- a/NEWS +++ b/NEWS @@ -5,6 +5,7 @@ This file documents the major additions and syntax changes between releases. check_dns: allow 'expected address' (-a) to be specified in CIDR notation (IPv4 only). check_dns: allow for IPv6 RDNS + check_dns: allow unsorted addresses check_apt: add --only-critical switch check_apt: add -l/--list option to print packages diff --git a/THANKS.in b/THANKS.in index ebc8155..9bb4382 100644 --- a/THANKS.in +++ b/THANKS.in @@ -356,3 +356,4 @@ Sven Geggus Thomas Kurschel Yannick Charton Nicolai Søborg +Rolf Eike Beer diff --git a/plugins/check_dns.c b/plugins/check_dns.c index ae4123b..75a9dca 100644 --- a/plugins/check_dns.c +++ b/plugins/check_dns.c @@ -230,10 +230,15 @@ main (int argc, char **argv) temp_buffer = ""; for (i=0; i Date: Wed, 25 Jul 2018 21:14:47 +0200 Subject: check_dns: allow forcing complete match of all addresses diff --git a/NEWS b/NEWS index ac06aa6..0848705 100644 --- a/NEWS +++ b/NEWS @@ -6,6 +6,7 @@ This file documents the major additions and syntax changes between releases. (IPv4 only). check_dns: allow for IPv6 RDNS check_dns: allow unsorted addresses + check_dns: allow forcing complete match of all addresses check_apt: add --only-critical switch check_apt: add -l/--list option to print packages diff --git a/plugins/check_dns.c b/plugins/check_dns.c index 75a9dca..25bd31d 100644 --- a/plugins/check_dns.c +++ b/plugins/check_dns.c @@ -56,6 +56,7 @@ char **expected_address = NULL; int expected_address_cnt = 0; int expect_authority = FALSE; +int all_match = FALSE; thresholds *time_thresholds = NULL; static int @@ -228,6 +229,8 @@ main (int argc, char **argv) if (result == STATE_OK && expected_address_cnt > 0) { result = STATE_CRITICAL; temp_buffer = ""; + unsigned long expect_match = (1 << expected_address_cnt) - 1; + unsigned long addr_match = (1 << n_addresses) - 1; for (i=0; i Date: Wed, 31 Oct 2018 15:22:50 +0100 Subject: Allow user to specify HTTP method after proxy CONNECT Simple format, avoids refactoring the CONNECT feature. diff --git a/plugins/check_http.c b/plugins/check_http.c index 86a36c2..1e2a54c 100644 --- a/plugins/check_http.c +++ b/plugins/check_http.c @@ -126,6 +126,7 @@ int max_page_len = 0; int redir_depth = 0; int max_depth = 15; char *http_method; +char *http_method_proxy; char *http_post_data; char *http_content_type; char buffer[MAX_INPUT_BUFFER]; @@ -446,6 +447,12 @@ process_arguments (int argc, char **argv) if (http_method) free(http_method); http_method = strdup (optarg); + char *tmp; + if ((tmp = strstr(http_method, ":")) > 0) { + tmp[0] = '\0'; + http_method = http_method; + http_method_proxy = ++tmp; + } break; case 'd': /* string or substring */ strncpy (header_expect, optarg, MAX_INPUT_BUFFER - 1); @@ -566,6 +573,9 @@ process_arguments (int argc, char **argv) if (http_method == NULL) http_method = strdup ("GET"); + if (http_method_proxy == NULL) + http_method_proxy = strdup ("GET"); + if (client_cert && !client_privkey) usage4 (_("If you use a client certificate you must also specify a private key file")); @@ -950,7 +960,7 @@ check_http (void) if ( server_address != NULL && strcmp(http_method, "CONNECT") == 0 && host_name != NULL && use_ssl == TRUE) - asprintf (&buf, "%s %s %s\r\n%s\r\n", "GET", server_url, host_name ? "HTTP/1.1" : "HTTP/1.0", user_agent); + asprintf (&buf, "%s %s %s\r\n%s\r\n", http_method_proxy, server_url, host_name ? "HTTP/1.1" : "HTTP/1.0", user_agent); else asprintf (&buf, "%s %s %s\r\n%s\r\n", http_method, server_url, host_name ? "HTTP/1.1" : "HTTP/1.0", user_agent); @@ -1581,7 +1591,7 @@ print_help (void) printf (" %s\n", _("URL to GET or POST (default: /)")); printf (" %s\n", "-P, --post=STRING"); printf (" %s\n", _("URL encoded http POST data")); - printf (" %s\n", "-j, --method=STRING (for example: HEAD, OPTIONS, TRACE, PUT, DELETE, CONNECT)"); + printf (" %s\n", "-j, --method=STRING (for example: HEAD, OPTIONS, TRACE, PUT, DELETE, CONNECT, CONNECT:POST)"); printf (" %s\n", _("Set HTTP method.")); printf (" %s\n", "-N, --no-body"); printf (" %s\n", _("Don't wait for document body: stop reading after headers.")); @@ -1668,7 +1678,8 @@ print_help (void) printf (" %s\n", _("all these options are needed: -I -p -u -S(sl) -j CONNECT -H ")); printf (" %s\n", _("a STATE_OK will be returned. When the server returns its content but exceeds")); printf (" %s\n", _("the 5-second threshold, a STATE_WARNING will be returned. When an error occurs,")); - printf (" %s\n", _("a STATE_CRITICAL will be returned.")); + printf (" %s\n", _("a STATE_CRITICAL will be returned. By adding a colon to the method you can set the method used")); + printf (" %s\n", _("inside the proxied connection: -j CONNECT:POST")); #endif -- cgit v0.10-9-g596f From 198611a3c2bd89562af87d8bf0c7584fce1ce037 Mon Sep 17 00:00:00 2001 From: Markus Frosch Date: Tue, 6 Nov 2018 11:32:52 +0100 Subject: test: Add proxy tests via proxy On Travis with a local squid diff --git a/.travis.yml b/.travis.yml index 617c415..123e178 100644 --- a/.travis.yml +++ b/.travis.yml @@ -55,6 +55,7 @@ install: - sudo apt-get install -qq --no-install-recommends autoconf automake - sudo apt-get install -qq --no-install-recommends faketime - sudo apt-get install -qq --no-install-recommends libmonitoring-plugin-perl + - sudo apt-get install -qq --no-install-recommends squid3 # Trusty related dependencies (not yet provided) - test "$(dpkg -l | grep -E "mysql-(client|server)-[0-9].[0-9]" | grep -c ^ii)" -gt 0 || sudo apt-get install -qq --no-install-recommends mariadb-client mariadb-server # enable ssl apache @@ -62,6 +63,8 @@ install: - sudo a2ensite default-ssl - sudo make-ssl-cert generate-default-snakeoil --force-overwrite - sudo service apache2 reload + - sudo cp tools/squid.conf /etc/squid3/squid.conf + - sudo service squid3 reload before_script: # ensure we have a test database in place for tests @@ -93,4 +96,3 @@ notifications: skip_join: true email: # - team@monitoring-plugins.org - diff --git a/plugins/t/NPTest.cache.travis b/plugins/t/NPTest.cache.travis index e9705f3..28437a0 100644 --- a/plugins/t/NPTest.cache.travis +++ b/plugins/t/NPTest.cache.travis @@ -59,4 +59,6 @@ 'host_udp_time' => 'none', 'host_tls_http' => 'localhost', 'host_tls_cert' => 'localhost', + 'NP_HOST_TCP_PROXY' => 'localhost', + 'NP_PORT_TCP_PROXY' => '3128', } diff --git a/plugins/t/check_http.t b/plugins/t/check_http.t index 8bd484a..416fbbc 100644 --- a/plugins/t/check_http.t +++ b/plugins/t/check_http.t @@ -9,7 +9,7 @@ use Test::More; use POSIX qw/mktime strftime/; use NPTest; -plan tests => 49; +plan tests => 55; my $successOutput = '/OK.*HTTP.*second/'; @@ -42,6 +42,14 @@ my $host_tcp_http2 = getTestParameter( "NP_HOST_TCP_HTTP2", "A host providing an index page containing the string 'monitoring'", "test.monitoring-plugins.org" ); +my $host_tcp_proxy = getTestParameter( "NP_HOST_TCP_PROXY", + "A host providing a HTTP proxy with CONNECT support", + "localhost"); + +my $port_tcp_proxy = getTestParameter( "NP_PORT_TCP_PROXY", + "Port of the proxy with HTTP and CONNECT support", + "3128"); + my $faketime = -x '/usr/bin/faketime' ? 1 : 0; @@ -198,3 +206,19 @@ SKIP: { $res = NPTest->testCmd( "./check_http -H www.mozilla.com --extended-perfdata" ); like ( $res->output, '/time_connect=[\d\.]+/', 'Extended Performance Data Output OK' ); } + +SKIP: { + skip "No internet access or proxy configured", 6 if $internet_access eq "no" or ! $host_tcp_proxy; + + $res = NPTest->testCmd( "./check_http -I $host_tcp_proxy -p $port_tcp_proxy -u http://$host_tcp_http -e 200,301,302"); + is( $res->return_code, 0, "Proxy HTTP works"); + like($res->output, qr/OK: Status line output matched/, "Proxy HTTP Output is sufficent"); + + $res = NPTest->testCmd( "./check_http -I $host_tcp_proxy -p $port_tcp_proxy -H $host_tls_http -S -j CONNECT"); + is( $res->return_code, 0, "Proxy HTTP CONNECT works"); + like($res->output, qr/HTTP OK:/, "Proxy HTTP CONNECT output sufficent"); + + $res = NPTest->testCmd( "./check_http -I $host_tcp_proxy -p $port_tcp_proxy -H $host_tls_http -S -j CONNECT:HEAD"); + is( $res->return_code, 0, "Proxy HTTP CONNECT works with override method"); + like($res->output, qr/HTTP OK:/, "Proxy HTTP CONNECT output sufficent"); +} diff --git a/tools/squid.conf b/tools/squid.conf new file mode 100644 index 0000000..bed7a58 --- /dev/null +++ b/tools/squid.conf @@ -0,0 +1,7979 @@ +# WELCOME TO SQUID 3.5.27 +# ---------------------------- +# +# This is the documentation for the Squid configuration file. +# This documentation can also be found online at: +# http://www.squid-cache.org/Doc/config/ +# +# You may wish to look at the Squid home page and wiki for the +# FAQ and other documentation: +# http://www.squid-cache.org/ +# http://wiki.squid-cache.org/SquidFaq +# http://wiki.squid-cache.org/ConfigExamples +# +# This documentation shows what the defaults for various directives +# happen to be. If you don't need to change the default, you should +# leave the line out of your squid.conf in most cases. +# +# In some cases "none" refers to no default setting at all, +# while in other cases it refers to the value of the option +# - the comments for that keyword indicate if this is the case. +# + +# Configuration options can be included using the "include" directive. +# Include takes a list of files to include. Quoting and wildcards are +# supported. +# +# For example, +# +# include /path/to/included/file/squid.acl.config +# +# Includes can be nested up to a hard-coded depth of 16 levels. +# This arbitrary restriction is to prevent recursive include references +# from causing Squid entering an infinite loop whilst trying to load +# configuration files. +# +# Values with byte units +# +# Squid accepts size units on some size related directives. All +# such directives are documented with a default value displaying +# a unit. +# +# Units accepted by Squid are: +# bytes - byte +# KB - Kilobyte (1024 bytes) +# MB - Megabyte +# GB - Gigabyte +# +# Values with spaces, quotes, and other special characters +# +# Squid supports directive parameters with spaces, quotes, and other +# special characters. Surround such parameters with "double quotes". Use +# the configuration_includes_quoted_values directive to enable or +# disable that support. +# +# Squid supports reading configuration option parameters from external +# files using the syntax: +# parameters("/path/filename") +# For example: +# acl whitelist dstdomain parameters("/etc/squid/whitelist.txt") +# +# Conditional configuration +# +# If-statements can be used to make configuration directives +# depend on conditions: +# +# if +# ... regular configuration directives ... +# [else +# ... regular configuration directives ...] +# endif +# +# The else part is optional. The keywords "if", "else", and "endif" +# must be typed on their own lines, as if they were regular +# configuration directives. +# +# NOTE: An else-if condition is not supported. +# +# These individual conditions types are supported: +# +# true +# Always evaluates to true. +# false +# Always evaluates to false. +# = +# Equality comparison of two integer numbers. +# +# +# SMP-Related Macros +# +# The following SMP-related preprocessor macros can be used. +# +# ${process_name} expands to the current Squid process "name" +# (e.g., squid1, squid2, or cache1). +# +# ${process_number} expands to the current Squid process +# identifier, which is an integer number (e.g., 1, 2, 3) unique +# across all Squid processes of the current service instance. +# +# ${service_name} expands into the current Squid service instance +# name identifier which is provided by -n on the command line. +# + +# TAG: broken_vary_encoding +# This option is not yet supported by Squid-3. +#Default: +# none + +# TAG: cache_vary +# This option is not yet supported by Squid-3. +#Default: +# none + +# TAG: error_map +# This option is not yet supported by Squid-3. +#Default: +# none + +# TAG: external_refresh_check +# This option is not yet supported by Squid-3. +#Default: +# none + +# TAG: location_rewrite_program +# This option is not yet supported by Squid-3. +#Default: +# none + +# TAG: refresh_stale_hit +# This option is not yet supported by Squid-3. +#Default: +# none + +# TAG: hierarchy_stoplist +# Remove this line. Use always_direct or cache_peer_access ACLs instead if you need to prevent cache_peer use. +#Default: +# none + +# TAG: log_access +# Remove this line. Use acls with access_log directives to control access logging +#Default: +# none + +# TAG: log_icap +# Remove this line. Use acls with icap_log directives to control icap logging +#Default: +# none + +# TAG: ignore_ims_on_miss +# Remove this line. The HTTP/1.1 feature is now configured by 'cache_miss_revalidate'. +#Default: +# none + +# TAG: chunked_request_body_max_size +# Remove this line. Squid is now HTTP/1.1 compliant. +#Default: +# none + +# TAG: dns_v4_fallback +# Remove this line. Squid performs a 'Happy Eyeballs' algorithm, the 'fallback' algorithm is no longer relevant. +#Default: +# none + +# TAG: emulate_httpd_log +# Replace this with an access_log directive using the format 'common' or 'combined'. +#Default: +# none + +# TAG: forward_log +# Use a regular access.log with ACL limiting it to MISS events. +#Default: +# none + +# TAG: ftp_list_width +# Remove this line. Configure FTP page display using the CSS controls in errorpages.css instead. +#Default: +# none + +# TAG: ignore_expect_100 +# Remove this line. The HTTP/1.1 feature is now fully supported by default. +#Default: +# none + +# TAG: log_fqdn +# Remove this option from your config. To log FQDN use %>A in the log format. +#Default: +# none + +# TAG: log_ip_on_direct +# Remove this option from your config. To log server or peer names use % +##auth_param negotiate children 20 startup=0 idle=1 +##auth_param negotiate keep_alive on +## +##auth_param digest program +##auth_param digest children 20 startup=0 idle=1 +##auth_param digest realm Squid proxy-caching web server +##auth_param digest nonce_garbage_interval 5 minutes +##auth_param digest nonce_max_duration 30 minutes +##auth_param digest nonce_max_count 50 +## +##auth_param ntlm program +##auth_param ntlm children 20 startup=0 idle=1 +##auth_param ntlm keep_alive on +## +##auth_param basic program +##auth_param basic children 5 startup=5 idle=1 +##auth_param basic realm Squid proxy-caching web server +##auth_param basic credentialsttl 2 hours +#Default: +# none + +# TAG: authenticate_cache_garbage_interval +# The time period between garbage collection across the username cache. +# This is a trade-off between memory utilization (long intervals - say +# 2 days) and CPU (short intervals - say 1 minute). Only change if you +# have good reason to. +#Default: +# authenticate_cache_garbage_interval 1 hour + +# TAG: authenticate_ttl +# The time a user & their credentials stay in the logged in +# user cache since their last request. When the garbage +# interval passes, all user credentials that have passed their +# TTL are removed from memory. +#Default: +# authenticate_ttl 1 hour + +# TAG: authenticate_ip_ttl +# If you use proxy authentication and the 'max_user_ip' ACL, +# this directive controls how long Squid remembers the IP +# addresses associated with each user. Use a small value +# (e.g., 60 seconds) if your users might change addresses +# quickly, as is the case with dialup. You might be safe +# using a larger value (e.g., 2 hours) in a corporate LAN +# environment with relatively static address assignments. +#Default: +# authenticate_ip_ttl 1 second + +# ACCESS CONTROLS +# ----------------------------------------------------------------------------- + +# TAG: external_acl_type +# This option defines external acl classes using a helper program +# to look up the status +# +# external_acl_type name [options] FORMAT.. /path/to/helper [helper arguments..] +# +# Options: +# +# ttl=n TTL in seconds for cached results (defaults to 3600 +# for 1 hour) +# +# negative_ttl=n +# TTL for cached negative lookups (default same +# as ttl) +# +# grace=n Percentage remaining of TTL where a refresh of a +# cached entry should be initiated without needing to +# wait for a new reply. (default is for no grace period) +# +# cache=n The maximum number of entries in the result cache. The +# default limit is 262144 entries. Each cache entry usually +# consumes at least 256 bytes. Squid currently does not remove +# expired cache entries until the limit is reached, so a proxy +# will sooner or later reach the limit. The expanded FORMAT +# value is used as the cache key, so if the details in FORMAT +# are highly variable, a larger cache may be needed to produce +# reduction in helper load. +# +# children-max=n +# Maximum number of acl helper processes spawned to service +# external acl lookups of this type. (default 5) +# +# children-startup=n +# Minimum number of acl helper processes to spawn during +# startup and reconfigure to service external acl lookups +# of this type. (default 0) +# +# children-idle=n +# Number of acl helper processes to keep ahead of traffic +# loads. Squid will spawn this many at once whenever load +# rises above the capabilities of existing processes. +# Up to the value of children-max. (default 1) +# +# concurrency=n concurrency level per process. Only used with helpers +# capable of processing more than one query at a time. +# +# protocol=2.5 Compatibility mode for Squid-2.5 external acl helpers. +# +# ipv4 / ipv6 IP protocol used to communicate with this helper. +# The default is to auto-detect IPv6 and use it when available. +# +# +# FORMAT specifications +# +# %LOGIN Authenticated user login name +# %un A user name. Expands to the first available name +# from the following list of information sources: +# - authenticated user name, like %ul or %LOGIN +# - user name sent by an external ACL, like %EXT_USER +# - SSL client name, like %us in logformat +# - ident user name, like %ui in logformat +# %EXT_USER Username from previous external acl +# %EXT_LOG Log details from previous external acl +# %EXT_TAG Tag from previous external acl +# %IDENT Ident user name +# %SRC Client IP +# %SRCPORT Client source port +# %URI Requested URI +# %DST Requested host +# %PROTO Requested URL scheme +# %PORT Requested port +# %PATH Requested URL path +# %METHOD Request method +# %MYADDR Squid interface address +# %MYPORT Squid http_port number +# %PATH Requested URL-path (including query-string if any) +# %USER_CERT SSL User certificate in PEM format +# %USER_CERTCHAIN SSL User certificate chain in PEM format +# %USER_CERT_xx SSL User certificate subject attribute xx +# %USER_CA_CERT_xx SSL User certificate issuer attribute xx +# %ssl::>sni SSL client SNI sent to Squid +# %ssl::{Header} HTTP request header "Header" +# %>{Hdr:member} +# HTTP request header "Hdr" list member "member" +# %>{Hdr:;member} +# HTTP request header list member using ; as +# list separator. ; can be any non-alphanumeric +# character. +# +# %<{Header} HTTP reply header "Header" +# %<{Hdr:member} +# HTTP reply header "Hdr" list member "member" +# %<{Hdr:;member} +# HTTP reply header list member using ; as +# list separator. ; can be any non-alphanumeric +# character. +# +# %ACL The name of the ACL being tested. +# %DATA The ACL arguments. If not used then any arguments +# is automatically added at the end of the line +# sent to the helper. +# NOTE: this will encode the arguments as one token, +# whereas the default will pass each separately. +# +# %% The percent sign. Useful for helpers which need +# an unchanging input format. +# +# +# General request syntax: +# +# [channel-ID] FORMAT-values [acl-values ...] +# +# +# FORMAT-values consists of transaction details expanded with +# whitespace separation per the config file FORMAT specification +# using the FORMAT macros listed above. +# +# acl-values consists of any string specified in the referencing +# config 'acl ... external' line. see the "acl external" directive. +# +# Request values sent to the helper are URL escaped to protect +# each value in requests against whitespaces. +# +# If using protocol=2.5 then the request sent to the helper is not +# URL escaped to protect against whitespace. +# +# NOTE: protocol=3.0 is deprecated as no longer necessary. +# +# When using the concurrency= option the protocol is changed by +# introducing a query channel tag in front of the request/response. +# The query channel tag is a number between 0 and concurrency-1. +# This value must be echoed back unchanged to Squid as the first part +# of the response relating to its request. +# +# +# The helper receives lines expanded per the above format specification +# and for each input line returns 1 line starting with OK/ERR/BH result +# code and optionally followed by additional keywords with more details. +# +# +# General result syntax: +# +# [channel-ID] result keyword=value ... +# +# Result consists of one of the codes: +# +# OK +# the ACL test produced a match. +# +# ERR +# the ACL test does not produce a match. +# +# BH +# An internal error occurred in the helper, preventing +# a result being identified. +# +# The meaning of 'a match' is determined by your squid.conf +# access control configuration. See the Squid wiki for details. +# +# Defined keywords: +# +# user= The users name (login) +# +# password= The users password (for login= cache_peer option) +# +# message= Message describing the reason for this response. +# Available as %o in error pages. +# Useful on (ERR and BH results). +# +# tag= Apply a tag to a request. Only sets a tag once, +# does not alter existing tags. +# +# log= String to be logged in access.log. Available as +# %ea in logformat specifications. +# +# clt_conn_tag= Associates a TAG with the client TCP connection. +# Please see url_rewrite_program related documentation +# for this kv-pair. +# +# Any keywords may be sent on any response whether OK, ERR or BH. +# +# All response keyword values need to be a single token with URL +# escaping, or enclosed in double quotes (") and escaped using \ on +# any double quotes or \ characters within the value. The wrapping +# double quotes are removed before the value is interpreted by Squid. +# \r and \n are also replace by CR and LF. +# +# Some example key values: +# +# user=John%20Smith +# user="John Smith" +# user="J. \"Bob\" Smith" +#Default: +# none + +# TAG: acl +# Defining an Access List +# +# Every access list definition must begin with an aclname and acltype, +# followed by either type-specific arguments or a quoted filename that +# they are read from. +# +# acl aclname acltype argument ... +# acl aclname acltype "file" ... +# +# When using "file", the file should contain one item per line. +# +# Some acl types supports options which changes their default behaviour. +# The available options are: +# +# -i,+i By default, regular expressions are CASE-SENSITIVE. To make them +# case-insensitive, use the -i option. To return case-sensitive +# use the +i option between patterns, or make a new ACL line +# without -i. +# +# -n Disable lookups and address type conversions. If lookup or +# conversion is required because the parameter type (IP or +# domain name) does not match the message address type (domain +# name or IP), then the ACL would immediately declare a mismatch +# without any warnings or lookups. +# +# -- Used to stop processing all options, in the case the first acl +# value has '-' character as first character (for example the '-' +# is a valid domain name) +# +# Some acl types require suspending the current request in order +# to access some external data source. +# Those which do are marked with the tag [slow], those which +# don't are marked as [fast]. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl +# for further information +# +# ***** ACL TYPES AVAILABLE ***** +# +# acl aclname src ip-address/mask ... # clients IP address [fast] +# acl aclname src addr1-addr2/mask ... # range of addresses [fast] +# acl aclname dst [-n] ip-address/mask ... # URL host's IP address [slow] +# acl aclname localip ip-address/mask ... # IP address the client connected to [fast] +# +# acl aclname arp mac-address ... (xx:xx:xx:xx:xx:xx notation) +# # [fast] +# # The 'arp' ACL code is not portable to all operating systems. +# # It works on Linux, Solaris, Windows, FreeBSD, and some other +# # BSD variants. +# # +# # NOTE: Squid can only determine the MAC/EUI address for IPv4 +# # clients that are on the same subnet. If the client is on a +# # different subnet, then Squid cannot find out its address. +# # +# # NOTE 2: IPv6 protocol does not contain ARP. MAC/EUI is either +# # encoded directly in the IPv6 address or not available. +# +# acl aclname srcdomain .foo.com ... +# # reverse lookup, from client IP [slow] +# acl aclname dstdomain [-n] .foo.com ... +# # Destination server from URL [fast] +# acl aclname srcdom_regex [-i] \.foo\.com ... +# # regex matching client name [slow] +# acl aclname dstdom_regex [-n] [-i] \.foo\.com ... +# # regex matching server [fast] +# # +# # For dstdomain and dstdom_regex a reverse lookup is tried if a IP +# # based URL is used and no match is found. The name "none" is used +# # if the reverse lookup fails. +# +# acl aclname src_as number ... +# acl aclname dst_as number ... +# # [fast] +# # Except for access control, AS numbers can be used for +# # routing of requests to specific caches. Here's an +# # example for routing all requests for AS#1241 and only +# # those to mycache.mydomain.net: +# # acl asexample dst_as 1241 +# # cache_peer_access mycache.mydomain.net allow asexample +# # cache_peer_access mycache_mydomain.net deny all +# +# acl aclname peername myPeer ... +# # [fast] +# # match against a named cache_peer entry +# # set unique name= on cache_peer lines for reliable use. +# +# acl aclname time [day-abbrevs] [h1:m1-h2:m2] +# # [fast] +# # day-abbrevs: +# # S - Sunday +# # M - Monday +# # T - Tuesday +# # W - Wednesday +# # H - Thursday +# # F - Friday +# # A - Saturday +# # h1:m1 must be less than h2:m2 +# +# acl aclname url_regex [-i] ^http:// ... +# # regex matching on whole URL [fast] +# acl aclname urllogin [-i] [^a-zA-Z0-9] ... +# # regex matching on URL login field +# acl aclname urlpath_regex [-i] \.gif$ ... +# # regex matching on URL path [fast] +# +# acl aclname port 80 70 21 0-1024... # destination TCP port [fast] +# # ranges are alloed +# acl aclname localport 3128 ... # TCP port the client connected to [fast] +# # NP: for interception mode this is usually '80' +# +# acl aclname myportname 3128 ... # *_port name [fast] +# +# acl aclname proto HTTP FTP ... # request protocol [fast] +# +# acl aclname method GET POST ... # HTTP request method [fast] +# +# acl aclname http_status 200 301 500- 400-403 ... +# # status code in reply [fast] +# +# acl aclname browser [-i] regexp ... +# # pattern match on User-Agent header (see also req_header below) [fast] +# +# acl aclname referer_regex [-i] regexp ... +# # pattern match on Referer header [fast] +# # Referer is highly unreliable, so use with care +# +# acl aclname ident username ... +# acl aclname ident_regex [-i] pattern ... +# # string match on ident output [slow] +# # use REQUIRED to accept any non-null ident. +# +# acl aclname proxy_auth [-i] username ... +# acl aclname proxy_auth_regex [-i] pattern ... +# # perform http authentication challenge to the client and match against +# # supplied credentials [slow] +# # +# # takes a list of allowed usernames. +# # use REQUIRED to accept any valid username. +# # +# # Will use proxy authentication in forward-proxy scenarios, and plain +# # http authenticaiton in reverse-proxy scenarios +# # +# # NOTE: when a Proxy-Authentication header is sent but it is not +# # needed during ACL checking the username is NOT logged +# # in access.log. +# # +# # NOTE: proxy_auth requires a EXTERNAL authentication program +# # to check username/password combinations (see +# # auth_param directive). +# # +# # NOTE: proxy_auth can't be used in a transparent/intercepting proxy +# # as the browser needs to be configured for using a proxy in order +# # to respond to proxy authentication. +# +# acl aclname snmp_community string ... +# # A community string to limit access to your SNMP Agent [fast] +# # Example: +# # +# # acl snmppublic snmp_community public +# +# acl aclname maxconn number +# # This will be matched when the client's IP address has +# # more than TCP connections established. [fast] +# # NOTE: This only measures direct TCP links so X-Forwarded-For +# # indirect clients are not counted. +# +# acl aclname max_user_ip [-s] number +# # This will be matched when the user attempts to log in from more +# # than different ip addresses. The authenticate_ip_ttl +# # parameter controls the timeout on the ip entries. [fast] +# # If -s is specified the limit is strict, denying browsing +# # from any further IP addresses until the ttl has expired. Without +# # -s Squid will just annoy the user by "randomly" denying requests. +# # (the counter is reset each time the limit is reached and a +# # request is denied) +# # NOTE: in acceleration mode or where there is mesh of child proxies, +# # clients may appear to come from multiple addresses if they are +# # going through proxy farms, so a limit of 1 may cause user problems. +# +# acl aclname random probability +# # Pseudo-randomly match requests. Based on the probability given. +# # Probability may be written as a decimal (0.333), fraction (1/3) +# # or ratio of matches:non-matches (3:5). +# +# acl aclname req_mime_type [-i] mime-type ... +# # regex match against the mime type of the request generated +# # by the client. Can be used to detect file upload or some +# # types HTTP tunneling requests [fast] +# # NOTE: This does NOT match the reply. You cannot use this +# # to match the returned file type. +# +# acl aclname req_header header-name [-i] any\.regex\.here +# # regex match against any of the known request headers. May be +# # thought of as a superset of "browser", "referer" and "mime-type" +# # ACL [fast] +# +# acl aclname rep_mime_type [-i] mime-type ... +# # regex match against the mime type of the reply received by +# # squid. Can be used to detect file download or some +# # types HTTP tunneling requests. [fast] +# # NOTE: This has no effect in http_access rules. It only has +# # effect in rules that affect the reply data stream such as +# # http_reply_access. +# +# acl aclname rep_header header-name [-i] any\.regex\.here +# # regex match against any of the known reply headers. May be +# # thought of as a superset of "browser", "referer" and "mime-type" +# # ACLs [fast] +# +# acl aclname external class_name [arguments...] +# # external ACL lookup via a helper class defined by the +# # external_acl_type directive [slow] +# +# acl aclname user_cert attribute values... +# # match against attributes in a user SSL certificate +# # attribute is one of DN/C/O/CN/L/ST or a numerical OID [fast] +# +# acl aclname ca_cert attribute values... +# # match against attributes a users issuing CA SSL certificate +# # attribute is one of DN/C/O/CN/L/ST or a numerical OID [fast] +# +# acl aclname ext_user username ... +# acl aclname ext_user_regex [-i] pattern ... +# # string match on username returned by external acl helper [slow] +# # use REQUIRED to accept any non-null user name. +# +# acl aclname tag tagvalue ... +# # string match on tag returned by external acl helper [fast] +# # DEPRECATED. Only the first tag will match with this ACL. +# # Use the 'note' ACL instead for handling multiple tag values. +# +# acl aclname hier_code codename ... +# # string match against squid hierarchy code(s); [fast] +# # e.g., DIRECT, PARENT_HIT, NONE, etc. +# # +# # NOTE: This has no effect in http_access rules. It only has +# # effect in rules that affect the reply data stream such as +# # http_reply_access. +# +# acl aclname note name [value ...] +# # match transaction annotation [fast] +# # Without values, matches any annotation with a given name. +# # With value(s), matches any annotation with a given name that +# # also has one of the given values. +# # Names and values are compared using a string equality test. +# # Annotation sources include note and adaptation_meta directives +# # as well as helper and eCAP responses. +# +# acl aclname adaptation_service service ... +# # Matches the name of any icap_service, ecap_service, +# # adaptation_service_set, or adaptation_service_chain that Squid +# # has used (or attempted to use) for the master transaction. +# # This ACL must be defined after the corresponding adaptation +# # service is named in squid.conf. This ACL is usable with +# # adaptation_meta because it starts matching immediately after +# # the service has been selected for adaptation. +# +# acl aclname any-of acl1 acl2 ... +# # match any one of the acls [fast or slow] +# # The first matching ACL stops further ACL evaluation. +# # +# # ACLs from multiple any-of lines with the same name are ORed. +# # For example, A = (a1 or a2) or (a3 or a4) can be written as +# # acl A any-of a1 a2 +# # acl A any-of a3 a4 +# # +# # This group ACL is fast if all evaluated ACLs in the group are fast +# # and slow otherwise. +# +# acl aclname all-of acl1 acl2 ... +# # match all of the acls [fast or slow] +# # The first mismatching ACL stops further ACL evaluation. +# # +# # ACLs from multiple all-of lines with the same name are ORed. +# # For example, B = (b1 and b2) or (b3 and b4) can be written as +# # acl B all-of b1 b2 +# # acl B all-of b3 b4 +# # +# # This group ACL is fast if all evaluated ACLs in the group are fast +# # and slow otherwise. +# +# Examples: +# acl macaddress arp 09:00:2b:23:45:67 +# acl myexample dst_as 1241 +# acl password proxy_auth REQUIRED +# acl fileupload req_mime_type -i ^multipart/form-data$ +# acl javascript rep_mime_type -i ^application/x-javascript$ +# +#Default: +# ACLs all, manager, localhost, and to_localhost are predefined. +# +# +# Recommended minimum configuration: +# + +# Example rule allowing access from your local networks. +# Adapt to list your (internal) IP networks from where browsing +# should be allowed +acl localnet src 10.0.0.0/8 # RFC1918 possible internal network +acl localnet src 172.16.0.0/12 # RFC1918 possible internal network +acl localnet src 192.168.0.0/16 # RFC1918 possible internal network +acl localnet src fc00::/7 # RFC 4193 local private network range +acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines + +acl SSL_ports port 443 +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 1025-65535 # unregistered ports +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +acl CONNECT method CONNECT + +# TAG: proxy_protocol_access +# Determine which client proxies can be trusted to provide correct +# information regarding real client IP address using PROXY protocol. +# +# Requests may pass through a chain of several other proxies +# before reaching us. The original source details may by sent in: +# * HTTP message Forwarded header, or +# * HTTP message X-Forwarded-For header, or +# * PROXY protocol connection header. +# +# This directive is solely for validating new PROXY protocol +# connections received from a port flagged with require-proxy-header. +# It is checked only once after TCP connection setup. +# +# A deny match results in TCP connection closure. +# +# An allow match is required for Squid to permit the corresponding +# TCP connection, before Squid even looks for HTTP request headers. +# If there is an allow match, Squid starts using PROXY header information +# to determine the source address of the connection for all future ACL +# checks, logging, etc. +# +# SECURITY CONSIDERATIONS: +# +# Any host from which we accept client IP details can place +# incorrect information in the relevant header, and Squid +# will use the incorrect information as if it were the +# source address of the request. This may enable remote +# hosts to bypass any access control restrictions that are +# based on the client's source addresses. +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# all TCP connections to ports with require-proxy-header will be denied + +# TAG: follow_x_forwarded_for +# Determine which client proxies can be trusted to provide correct +# information regarding real client IP address. +# +# Requests may pass through a chain of several other proxies +# before reaching us. The original source details may by sent in: +# * HTTP message Forwarded header, or +# * HTTP message X-Forwarded-For header, or +# * PROXY protocol connection header. +# +# PROXY protocol connections are controlled by the proxy_protocol_access +# directive which is checked before this. +# +# If a request reaches us from a source that is allowed by this +# directive, then we trust the information it provides regarding +# the IP of the client it received from (if any). +# +# For the purpose of ACLs used in this directive the src ACL type always +# matches the address we are testing and srcdomain matches its rDNS. +# +# On each HTTP request Squid checks for X-Forwarded-For header fields. +# If found the header values are iterated in reverse order and an allow +# match is required for Squid to continue on to the next value. +# The verification ends when a value receives a deny match, cannot be +# tested, or there are no more values to test. +# NOTE: Squid does not yet follow the Forwarded HTTP header. +# +# The end result of this process is an IP address that we will +# refer to as the indirect client address. This address may +# be treated as the client address for access control, ICAP, delay +# pools and logging, depending on the acl_uses_indirect_client, +# icap_uses_indirect_client, delay_pool_uses_indirect_client, +# log_uses_indirect_client and tproxy_uses_indirect_client options. +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +# SECURITY CONSIDERATIONS: +# +# Any host from which we accept client IP details can place +# incorrect information in the relevant header, and Squid +# will use the incorrect information as if it were the +# source address of the request. This may enable remote +# hosts to bypass any access control restrictions that are +# based on the client's source addresses. +# +# For example: +# +# acl localhost src 127.0.0.1 +# acl my_other_proxy srcdomain .proxy.example.com +# follow_x_forwarded_for allow localhost +# follow_x_forwarded_for allow my_other_proxy +#Default: +# X-Forwarded-For header will be ignored. + +# TAG: acl_uses_indirect_client on|off +# Controls whether the indirect client address +# (see follow_x_forwarded_for) is used instead of the +# direct client address in acl matching. +# +# NOTE: maxconn ACL considers direct TCP links and indirect +# clients will always have zero. So no match. +#Default: +# acl_uses_indirect_client on + +# TAG: delay_pool_uses_indirect_client on|off +# Controls whether the indirect client address +# (see follow_x_forwarded_for) is used instead of the +# direct client address in delay pools. +#Default: +# delay_pool_uses_indirect_client on + +# TAG: log_uses_indirect_client on|off +# Controls whether the indirect client address +# (see follow_x_forwarded_for) is used instead of the +# direct client address in the access log. +#Default: +# log_uses_indirect_client on + +# TAG: tproxy_uses_indirect_client on|off +# Controls whether the indirect client address +# (see follow_x_forwarded_for) is used instead of the +# direct client address when spoofing the outgoing client. +# +# This has no effect on requests arriving in non-tproxy +# mode ports. +# +# SECURITY WARNING: Usage of this option is dangerous +# and should not be used trivially. Correct configuration +# of follow_x_forwarded_for with a limited set of trusted +# sources is required to prevent abuse of your proxy. +#Default: +# tproxy_uses_indirect_client off + +# TAG: spoof_client_ip +# Control client IP address spoofing of TPROXY traffic based on +# defined access lists. +# +# spoof_client_ip allow|deny [!]aclname ... +# +# If there are no "spoof_client_ip" lines present, the default +# is to "allow" spoofing of any suitable request. +# +# Note that the cache_peer "no-tproxy" option overrides this ACL. +# +# This clause supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# Allow spoofing on all TPROXY traffic. + +# TAG: http_access +# Allowing or Denying access based on defined access lists +# +# To allow or deny a message received on an HTTP, HTTPS, or FTP port: +# http_access allow|deny [!]aclname ... +# +# NOTE on default values: +# +# If there are no "access" lines present, the default is to deny +# the request. +# +# If none of the "access" lines cause a match, the default is the +# opposite of the last line in the list. If the last line was +# deny, the default is allow. Conversely, if the last line +# is allow, the default will be deny. For these reasons, it is a +# good idea to have an "deny all" entry at the end of your access +# lists to avoid potential confusion. +# +# This clause supports both fast and slow acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +#Default: +# Deny, unless rules exist in squid.conf. +# + +# +# Recommended minimum Access Permission configuration: +# +# Deny requests to certain unsafe ports +http_access deny !Safe_ports + +# Deny CONNECT to other than secure SSL ports +http_access deny CONNECT !SSL_ports + +# Only allow cachemgr access from localhost +http_access allow localhost manager +http_access deny manager + +# We strongly recommend the following be uncommented to protect innocent +# web applications running on the proxy server who think the only +# one who can access services on "localhost" is a local user +#http_access deny to_localhost + +# +# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS +# + +# Example rule allowing access from your local networks. +# Adapt localnet in the ACL section to list your (internal) IP networks +# from where browsing should be allowed +http_access allow localnet +http_access allow localhost + +# And finally deny all other access to this proxy +http_access deny all + +# TAG: adapted_http_access +# Allowing or Denying access based on defined access lists +# +# Essentially identical to http_access, but runs after redirectors +# and ICAP/eCAP adaptation. Allowing access control based on their +# output. +# +# If not set then only http_access is used. +#Default: +# Allow, unless rules exist in squid.conf. + +# TAG: http_reply_access +# Allow replies to client requests. This is complementary to http_access. +# +# http_reply_access allow|deny [!] aclname ... +# +# NOTE: if there are no access lines present, the default is to allow +# all replies. +# +# If none of the access lines cause a match the opposite of the +# last line will apply. Thus it is good practice to end the rules +# with an "allow all" or "deny all" entry. +# +# This clause supports both fast and slow acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# Allow, unless rules exist in squid.conf. + +# TAG: icp_access +# Allowing or Denying access to the ICP port based on defined +# access lists +# +# icp_access allow|deny [!]aclname ... +# +# NOTE: The default if no icp_access lines are present is to +# deny all traffic. This default may cause problems with peers +# using ICP. +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +## Allow ICP queries from local networks only +##icp_access allow localnet +##icp_access deny all +#Default: +# Deny, unless rules exist in squid.conf. + +# TAG: htcp_access +# Allowing or Denying access to the HTCP port based on defined +# access lists +# +# htcp_access allow|deny [!]aclname ... +# +# See also htcp_clr_access for details on access control for +# cache purge (CLR) HTCP messages. +# +# NOTE: The default if no htcp_access lines are present is to +# deny all traffic. This default may cause problems with peers +# using the htcp option. +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +## Allow HTCP queries from local networks only +##htcp_access allow localnet +##htcp_access deny all +#Default: +# Deny, unless rules exist in squid.conf. + +# TAG: htcp_clr_access +# Allowing or Denying access to purge content using HTCP based +# on defined access lists. +# See htcp_access for details on general HTCP access control. +# +# htcp_clr_access allow|deny [!]aclname ... +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +## Allow HTCP CLR requests from trusted peers +#acl htcp_clr_peer src 192.0.2.2 2001:DB8::2 +#htcp_clr_access allow htcp_clr_peer +#htcp_clr_access deny all +#Default: +# Deny, unless rules exist in squid.conf. + +# TAG: miss_access +# Determines whether network access is permitted when satisfying a request. +# +# For example; +# to force your neighbors to use you as a sibling instead of +# a parent. +# +# acl localclients src 192.0.2.0/24 2001:DB8::a:0/64 +# miss_access deny !localclients +# miss_access allow all +# +# This means only your local clients are allowed to fetch relayed/MISS +# replies from the network and all other clients can only fetch cached +# objects (HITs). +# +# The default for this setting allows all clients who passed the +# http_access rules to relay via this proxy. +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# Allow, unless rules exist in squid.conf. + +# TAG: ident_lookup_access +# A list of ACL elements which, if matched, cause an ident +# (RFC 931) lookup to be performed for this request. For +# example, you might choose to always perform ident lookups +# for your main multi-user Unix boxes, but not for your Macs +# and PCs. By default, ident lookups are not performed for +# any requests. +# +# To enable ident lookups for specific client addresses, you +# can follow this example: +# +# acl ident_aware_hosts src 198.168.1.0/24 +# ident_lookup_access allow ident_aware_hosts +# ident_lookup_access deny all +# +# Only src type ACL checks are fully supported. A srcdomain +# ACL might work at times, but it will not always provide +# the correct result. +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# Unless rules exist in squid.conf, IDENT is not fetched. + +# TAG: reply_body_max_size size [acl acl...] +# This option specifies the maximum size of a reply body. It can be +# used to prevent users from downloading very large files, such as +# MP3's and movies. When the reply headers are received, the +# reply_body_max_size lines are processed, and the first line where +# all (if any) listed ACLs are true is used as the maximum body size +# for this reply. +# +# This size is checked twice. First when we get the reply headers, +# we check the content-length value. If the content length value exists +# and is larger than the allowed size, the request is denied and the +# user receives an error message that says "the request or reply +# is too large." If there is no content-length, and the reply +# size exceeds this limit, the client's connection is just closed +# and they will receive a partial reply. +# +# WARNING: downstream caches probably can not detect a partial reply +# if there is no content-length header, so they will cache +# partial responses and give them out as hits. You should NOT +# use this option if you have downstream caches. +# +# WARNING: A maximum size smaller than the size of squid's error messages +# will cause an infinite loop and crash squid. Ensure that the smallest +# non-zero value you use is greater that the maximum header size plus +# the size of your largest error page. +# +# If you set this parameter none (the default), there will be +# no limit imposed. +# +# Configuration Format is: +# reply_body_max_size SIZE UNITS [acl ...] +# ie. +# reply_body_max_size 10 MB +# +#Default: +# No limit is applied. + +# NETWORK OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: http_port +# Usage: port [mode] [options] +# hostname:port [mode] [options] +# 1.2.3.4:port [mode] [options] +# +# The socket addresses where Squid will listen for HTTP client +# requests. You may specify multiple socket addresses. +# There are three forms: port alone, hostname with port, and +# IP address with port. If you specify a hostname or IP +# address, Squid binds the socket to that specific +# address. Most likely, you do not need to bind to a specific +# address, so you can use the port number alone. +# +# If you are running Squid in accelerator mode, you +# probably want to listen on port 80 also, or instead. +# +# The -a command line option may be used to specify additional +# port(s) where Squid listens for proxy request. Such ports will +# be plain proxy ports with no options. +# +# You may specify multiple socket addresses on multiple lines. +# +# Modes: +# +# intercept Support for IP-Layer NAT interception delivering +# traffic to this Squid port. +# NP: disables authentication on the port. +# +# tproxy Support Linux TPROXY (or BSD divert-to) with spoofing +# of outgoing connections using the client IP address. +# NP: disables authentication on the port. +# +# accel Accelerator / reverse proxy mode +# +# ssl-bump For each CONNECT request allowed by ssl_bump ACLs, +# establish secure connection with the client and with +# the server, decrypt HTTPS messages as they pass through +# Squid, and treat them as unencrypted HTTP messages, +# becoming the man-in-the-middle. +# +# The ssl_bump option is required to fully enable +# bumping of CONNECT requests. +# +# Omitting the mode flag causes default forward proxy mode to be used. +# +# +# Accelerator Mode Options: +# +# defaultsite=domainname +# What to use for the Host: header if it is not present +# in a request. Determines what site (not origin server) +# accelerators should consider the default. +# +# no-vhost Disable using HTTP/1.1 Host header for virtual domain support. +# +# protocol= Protocol to reconstruct accelerated and intercepted +# requests with. Defaults to HTTP/1.1 for http_port and +# HTTPS/1.1 for https_port. +# When an unsupported value is configured Squid will +# produce a FATAL error. +# Values: HTTP or HTTP/1.1, HTTPS or HTTPS/1.1 +# +# vport Virtual host port support. Using the http_port number +# instead of the port passed on Host: headers. +# +# vport=NN Virtual host port support. Using the specified port +# number instead of the port passed on Host: headers. +# +# act-as-origin +# Act as if this Squid is the origin server. +# This currently means generate new Date: and Expires: +# headers on HIT instead of adding Age:. +# +# ignore-cc Ignore request Cache-Control headers. +# +# WARNING: This option violates HTTP specifications if +# used in non-accelerator setups. +# +# allow-direct Allow direct forwarding in accelerator mode. Normally +# accelerated requests are denied direct forwarding as if +# never_direct was used. +# +# WARNING: this option opens accelerator mode to security +# vulnerabilities usually only affecting in interception +# mode. Make sure to protect forwarding with suitable +# http_access rules when using this. +# +# +# SSL Bump Mode Options: +# In addition to these options ssl-bump requires TLS/SSL options. +# +# generate-host-certificates[=] +# Dynamically create SSL server certificates for the +# destination hosts of bumped CONNECT requests.When +# enabled, the cert and key options are used to sign +# generated certificates. Otherwise generated +# certificate will be selfsigned. +# If there is a CA certificate lifetime of the generated +# certificate equals lifetime of the CA certificate. If +# generated certificate is selfsigned lifetime is three +# years. +# This option is disabled by default. See the ssl-bump +# option above for more information. +# +# dynamic_cert_mem_cache_size=SIZE +# Approximate total RAM size spent on cached generated +# certificates. If set to zero, caching is disabled. +# +# TLS / SSL Options: +# +# cert= Path to SSL certificate (PEM format). +# +# key= Path to SSL private key file (PEM format) +# if not specified, the certificate file is +# assumed to be a combined certificate and +# key file. +# +# version= The version of SSL/TLS supported +# 1 automatic (default) +# 2 SSLv2 only +# 3 SSLv3 only +# 4 TLSv1.0 only +# 5 TLSv1.1 only +# 6 TLSv1.2 only +# +# cipher= Colon separated list of supported ciphers. +# NOTE: some ciphers such as EDH ciphers depend on +# additional settings. If those settings are +# omitted the ciphers may be silently ignored +# by the OpenSSL library. +# +# options= Various SSL implementation options. The most important +# being: +# NO_SSLv2 Disallow the use of SSLv2 +# NO_SSLv3 Disallow the use of SSLv3 +# NO_TLSv1 Disallow the use of TLSv1.0 +# NO_TLSv1_1 Disallow the use of TLSv1.1 +# NO_TLSv1_2 Disallow the use of TLSv1.2 +# SINGLE_DH_USE Always create a new key when using +# temporary/ephemeral DH key exchanges +# NO_TICKET Disables TLS tickets extension +# +# SINGLE_ECDH_USE +# Enable ephemeral ECDH key exchange. +# The adopted curve should be specified +# using the tls-dh option. +# +# ALL Enable various bug workarounds +# suggested as "harmless" by OpenSSL +# Be warned that this reduces SSL/TLS +# strength to some attacks. +# See OpenSSL SSL_CTX_set_options documentation for a +# complete list of options. +# +# clientca= File containing the list of CAs to use when +# requesting a client certificate. +# +# cafile= File containing additional CA certificates to +# use when verifying client certificates. If unset +# clientca will be used. +# +# capath= Directory containing additional CA certificates +# and CRL lists to use when verifying client certificates. +# +# crlfile= File of additional CRL lists to use when verifying +# the client certificate, in addition to CRLs stored in +# the capath. Implies VERIFY_CRL flag below. +# +# tls-dh=[curve:]file +# File containing DH parameters for temporary/ephemeral DH key +# exchanges, optionally prefixed by a curve for ephemeral ECDH +# key exchanges. +# See OpenSSL documentation for details on how to create the +# DH parameter file. Supported curves for ECDH can be listed +# using the "openssl ecparam -list_curves" command. +# WARNING: EDH and EECDH ciphers will be silently disabled if +# this option is not set. +# +# sslflags= Various flags modifying the use of SSL: +# DELAYED_AUTH +# Don't request client certificates +# immediately, but wait until acl processing +# requires a certificate (not yet implemented). +# NO_DEFAULT_CA +# Don't use the default CA lists built in +# to OpenSSL. +# NO_SESSION_REUSE +# Don't allow for session reuse. Each connection +# will result in a new SSL session. +# VERIFY_CRL +# Verify CRL lists when accepting client +# certificates. +# VERIFY_CRL_ALL +# Verify CRL lists for all certificates in the +# client certificate chain. +# +# sslcontext= SSL session ID context identifier. +# +# Other Options: +# +# connection-auth[=on|off] +# use connection-auth=off to tell Squid to prevent +# forwarding Microsoft connection oriented authentication +# (NTLM, Negotiate and Kerberos) +# +# disable-pmtu-discovery= +# Control Path-MTU discovery usage: +# off lets OS decide on what to do (default). +# transparent disable PMTU discovery when transparent +# support is enabled. +# always disable always PMTU discovery. +# +# In many setups of transparently intercepting proxies +# Path-MTU discovery can not work on traffic towards the +# clients. This is the case when the intercepting device +# does not fully track connections and fails to forward +# ICMP must fragment messages to the cache server. If you +# have such setup and experience that certain clients +# sporadically hang or never complete requests set +# disable-pmtu-discovery option to 'transparent'. +# +# name= Specifies a internal name for the port. Defaults to +# the port specification (port or addr:port) +# +# tcpkeepalive[=idle,interval,timeout] +# Enable TCP keepalive probes of idle connections. +# In seconds; idle is the initial time before TCP starts +# probing the connection, interval how often to probe, and +# timeout the time before giving up. +# +# require-proxy-header +# Require PROXY protocol version 1 or 2 connections. +# The proxy_protocol_access is required to whitelist +# downstream proxies which can be trusted. +# +# If you run Squid on a dual-homed machine with an internal +# and an external interface we recommend you to specify the +# internal address:port in http_port. This way Squid will only be +# visible on the internal address. +# +# + +# Squid normally listens to port 3128 +http_port 3128 + +# TAG: https_port +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Usage: [ip:]port cert=certificate.pem [key=key.pem] [mode] [options...] +# +# The socket address where Squid will listen for client requests made +# over TLS or SSL connections. Commonly referred to as HTTPS. +# +# This is most useful for situations where you are running squid in +# accelerator mode and you want to do the SSL work at the accelerator level. +# +# You may specify multiple socket addresses on multiple lines, +# each with their own SSL certificate and/or options. +# +# Modes: +# +# accel Accelerator / reverse proxy mode +# +# intercept Support for IP-Layer interception of +# outgoing requests without browser settings. +# NP: disables authentication and IPv6 on the port. +# +# tproxy Support Linux TPROXY for spoofing outgoing +# connections using the client IP address. +# NP: disables authentication and maybe IPv6 on the port. +# +# ssl-bump For each intercepted connection allowed by ssl_bump +# ACLs, establish a secure connection with the client and with +# the server, decrypt HTTPS messages as they pass through +# Squid, and treat them as unencrypted HTTP messages, +# becoming the man-in-the-middle. +# +# An "ssl_bump server-first" match is required to +# fully enable bumping of intercepted SSL connections. +# +# Requires tproxy or intercept. +# +# Omitting the mode flag causes default forward proxy mode to be used. +# +# +# See http_port for a list of generic options +# +# +# SSL Options: +# +# cert= Path to SSL certificate (PEM format). +# +# key= Path to SSL private key file (PEM format) +# if not specified, the certificate file is +# assumed to be a combined certificate and +# key file. +# +# version= The version of SSL/TLS supported +# 1 automatic (default) +# 2 SSLv2 only +# 3 SSLv3 only +# 4 TLSv1 only +# +# cipher= Colon separated list of supported ciphers. +# +# options= Various SSL engine options. The most important +# being: +# NO_SSLv2 Disallow the use of SSLv2 +# NO_SSLv3 Disallow the use of SSLv3 +# NO_TLSv1 Disallow the use of TLSv1 +# +# SINGLE_DH_USE Always create a new key when using +# temporary/ephemeral DH key exchanges +# +# SINGLE_ECDH_USE +# Enable ephemeral ECDH key exchange. +# The adopted curve should be specified +# using the tls-dh option. +# +# See src/ssl_support.c or OpenSSL SSL_CTX_set_options +# documentation for a complete list of options. +# +# clientca= File containing the list of CAs to use when +# requesting a client certificate. +# +# cafile= File containing additional CA certificates to +# use when verifying client certificates. If unset +# clientca will be used. +# +# capath= Directory containing additional CA certificates +# and CRL lists to use when verifying client certificates. +# +# crlfile= File of additional CRL lists to use when verifying +# the client certificate, in addition to CRLs stored in +# the capath. Implies VERIFY_CRL flag below. +# +# tls-dh=[curve:]file +# File containing DH parameters for temporary/ephemeral DH key +# exchanges, optionally prefixed by a curve for ephemeral ECDH +# key exchanges. +# +# sslflags= Various flags modifying the use of SSL: +# DELAYED_AUTH +# Don't request client certificates +# immediately, but wait until acl processing +# requires a certificate (not yet implemented). +# NO_DEFAULT_CA +# Don't use the default CA lists built in +# to OpenSSL. +# NO_SESSION_REUSE +# Don't allow for session reuse. Each connection +# will result in a new SSL session. +# VERIFY_CRL +# Verify CRL lists when accepting client +# certificates. +# VERIFY_CRL_ALL +# Verify CRL lists for all certificates in the +# client certificate chain. +# +# sslcontext= SSL session ID context identifier. +# +# generate-host-certificates[=] +# Dynamically create SSL server certificates for the +# destination hosts of bumped SSL requests.When +# enabled, the cert and key options are used to sign +# generated certificates. Otherwise generated +# certificate will be selfsigned. +# If there is CA certificate life time of generated +# certificate equals lifetime of CA certificate. If +# generated certificate is selfsigned lifetime is three +# years. +# This option is disabled by default. See the ssl-bump +# option above for more information. +# +# dynamic_cert_mem_cache_size=SIZE +# Approximate total RAM size spent on cached generated +# certificates. If set to zero, caching is disabled. +# +# See http_port for a list of available options. +#Default: +# none + +# TAG: ftp_port +# Enables Native FTP proxy by specifying the socket address where Squid +# listens for FTP client requests. See http_port directive for various +# ways to specify the listening address and mode. +# +# Usage: ftp_port address [mode] [options] +# +# WARNING: This is a new, experimental, complex feature that has seen +# limited production exposure. Some Squid modules (e.g., caching) do not +# currently work with native FTP proxying, and many features have not +# even been tested for compatibility. Test well before deploying! +# +# Native FTP proxying differs substantially from proxying HTTP requests +# with ftp:// URIs because Squid works as an FTP server and receives +# actual FTP commands (rather than HTTP requests with FTP URLs). +# +# Native FTP commands accepted at ftp_port are internally converted or +# wrapped into HTTP-like messages. The same happens to Native FTP +# responses received from FTP origin servers. Those HTTP-like messages +# are shoveled through regular access control and adaptation layers +# between the FTP client and the FTP origin server. This allows Squid to +# examine, adapt, block, and log FTP exchanges. Squid reuses most HTTP +# mechanisms when shoveling wrapped FTP messages. For example, +# http_access and adaptation_access directives are used. +# +# Modes: +# +# intercept Same as http_port intercept. The FTP origin address is +# determined based on the intended destination of the +# intercepted connection. +# +# tproxy Support Linux TPROXY for spoofing outgoing +# connections using the client IP address. +# NP: disables authentication and maybe IPv6 on the port. +# +# By default (i.e., without an explicit mode option), Squid extracts the +# FTP origin address from the login@origin parameter of the FTP USER +# command. Many popular FTP clients support such native FTP proxying. +# +# Options: +# +# name=token Specifies an internal name for the port. Defaults to +# the port address. Usable with myportname ACL. +# +# ftp-track-dirs +# Enables tracking of FTP directories by injecting extra +# PWD commands and adjusting Request-URI (in wrapping +# HTTP requests) to reflect the current FTP server +# directory. Tracking is disabled by default. +# +# protocol=FTP Protocol to reconstruct accelerated and intercepted +# requests with. Defaults to FTP. No other accepted +# values have been tested with. An unsupported value +# results in a FATAL error. Accepted values are FTP, +# HTTP (or HTTP/1.1), and HTTPS (or HTTPS/1.1). +# +# Other http_port modes and options that are not specific to HTTP and +# HTTPS may also work. +#Default: +# none + +# TAG: tcp_outgoing_tos +# Allows you to select a TOS/Diffserv value for packets outgoing +# on the server side, based on an ACL. +# +# tcp_outgoing_tos ds-field [!]aclname ... +# +# Example where normal_service_net uses the TOS value 0x00 +# and good_service_net uses 0x20 +# +# acl normal_service_net src 10.0.0.0/24 +# acl good_service_net src 10.0.1.0/24 +# tcp_outgoing_tos 0x00 normal_service_net +# tcp_outgoing_tos 0x20 good_service_net +# +# TOS/DSCP values really only have local significance - so you should +# know what you're specifying. For more information, see RFC2474, +# RFC2475, and RFC3260. +# +# The TOS/DSCP byte must be exactly that - a octet value 0 - 255, or +# "default" to use whatever default your host has. +# Note that only multiples of 4 are usable as the two rightmost bits have +# been redefined for use by ECN (RFC 3168 section 23.1). +# The squid parser will enforce this by masking away the ECN bits. +# +# Processing proceeds in the order specified, and stops at first fully +# matching line. +# +# Only fast ACLs are supported. +#Default: +# none + +# TAG: clientside_tos +# Allows you to select a TOS/DSCP value for packets being transmitted +# on the client-side, based on an ACL. +# +# clientside_tos ds-field [!]aclname ... +# +# Example where normal_service_net uses the TOS value 0x00 +# and good_service_net uses 0x20 +# +# acl normal_service_net src 10.0.0.0/24 +# acl good_service_net src 10.0.1.0/24 +# clientside_tos 0x00 normal_service_net +# clientside_tos 0x20 good_service_net +# +# Note: This feature is incompatible with qos_flows. Any TOS values set here +# will be overwritten by TOS values in qos_flows. +# +# The TOS/DSCP byte must be exactly that - a octet value 0 - 255, or +# "default" to use whatever default your host has. +# Note that only multiples of 4 are usable as the two rightmost bits have +# been redefined for use by ECN (RFC 3168 section 23.1). +# The squid parser will enforce this by masking away the ECN bits. +# +#Default: +# none + +# TAG: tcp_outgoing_mark +# Note: This option is only available if Squid is rebuilt with the +# Packet MARK (Linux) +# +# Allows you to apply a Netfilter mark value to outgoing packets +# on the server side, based on an ACL. +# +# tcp_outgoing_mark mark-value [!]aclname ... +# +# Example where normal_service_net uses the mark value 0x00 +# and good_service_net uses 0x20 +# +# acl normal_service_net src 10.0.0.0/24 +# acl good_service_net src 10.0.1.0/24 +# tcp_outgoing_mark 0x00 normal_service_net +# tcp_outgoing_mark 0x20 good_service_net +# +# Only fast ACLs are supported. +#Default: +# none + +# TAG: clientside_mark +# Note: This option is only available if Squid is rebuilt with the +# Packet MARK (Linux) +# +# Allows you to apply a Netfilter mark value to packets being transmitted +# on the client-side, based on an ACL. +# +# clientside_mark mark-value [!]aclname ... +# +# Example where normal_service_net uses the mark value 0x00 +# and good_service_net uses 0x20 +# +# acl normal_service_net src 10.0.0.0/24 +# acl good_service_net src 10.0.1.0/24 +# clientside_mark 0x00 normal_service_net +# clientside_mark 0x20 good_service_net +# +# Note: This feature is incompatible with qos_flows. Any mark values set here +# will be overwritten by mark values in qos_flows. +#Default: +# none + +# TAG: qos_flows +# Allows you to select a TOS/DSCP value to mark outgoing +# connections to the client, based on where the reply was sourced. +# For platforms using netfilter, allows you to set a netfilter mark +# value instead of, or in addition to, a TOS value. +# +# By default this functionality is disabled. To enable it with the default +# settings simply use "qos_flows mark" or "qos_flows tos". Default +# settings will result in the netfilter mark or TOS value being copied +# from the upstream connection to the client. Note that it is the connection +# CONNMARK value not the packet MARK value that is copied. +# +# It is not currently possible to copy the mark or TOS value from the +# client to the upstream connection request. +# +# TOS values really only have local significance - so you should +# know what you're specifying. For more information, see RFC2474, +# RFC2475, and RFC3260. +# +# The TOS/DSCP byte must be exactly that - a octet value 0 - 255. +# Note that only multiples of 4 are usable as the two rightmost bits have +# been redefined for use by ECN (RFC 3168 section 23.1). +# The squid parser will enforce this by masking away the ECN bits. +# +# Mark values can be any unsigned 32-bit integer value. +# +# This setting is configured by setting the following values: +# +# tos|mark Whether to set TOS or netfilter mark values +# +# local-hit=0xFF Value to mark local cache hits. +# +# sibling-hit=0xFF Value to mark hits from sibling peers. +# +# parent-hit=0xFF Value to mark hits from parent peers. +# +# miss=0xFF[/mask] Value to mark cache misses. Takes precedence +# over the preserve-miss feature (see below), unless +# mask is specified, in which case only the bits +# specified in the mask are written. +# +# The TOS variant of the following features are only possible on Linux +# and require your kernel to be patched with the TOS preserving ZPH +# patch, available from http://zph.bratcheda.org +# No patch is needed to preserve the netfilter mark, which will work +# with all variants of netfilter. +# +# disable-preserve-miss +# This option disables the preservation of the TOS or netfilter +# mark. By default, the existing TOS or netfilter mark value of +# the response coming from the remote server will be retained +# and masked with miss-mark. +# NOTE: in the case of a netfilter mark, the mark must be set on +# the connection (using the CONNMARK target) not on the packet +# (MARK target). +# +# miss-mask=0xFF +# Allows you to mask certain bits in the TOS or mark value +# received from the remote server, before copying the value to +# the TOS sent towards clients. +# Default for tos: 0xFF (TOS from server is not changed). +# Default for mark: 0xFFFFFFFF (mark from server is not changed). +# +# All of these features require the --enable-zph-qos compilation flag +# (enabled by default). Netfilter marking also requires the +# libnetfilter_conntrack libraries (--with-netfilter-conntrack) and +# libcap 2.09+ (--with-libcap). +# +#Default: +# none + +# TAG: tcp_outgoing_address +# Allows you to map requests to different outgoing IP addresses +# based on the username or source address of the user making +# the request. +# +# tcp_outgoing_address ipaddr [[!]aclname] ... +# +# For example; +# Forwarding clients with dedicated IPs for certain subnets. +# +# acl normal_service_net src 10.0.0.0/24 +# acl good_service_net src 10.0.2.0/24 +# +# tcp_outgoing_address 2001:db8::c001 good_service_net +# tcp_outgoing_address 10.1.0.2 good_service_net +# +# tcp_outgoing_address 2001:db8::beef normal_service_net +# tcp_outgoing_address 10.1.0.1 normal_service_net +# +# tcp_outgoing_address 2001:db8::1 +# tcp_outgoing_address 10.1.0.3 +# +# Processing proceeds in the order specified, and stops at first fully +# matching line. +# +# Squid will add an implicit IP version test to each line. +# Requests going to IPv4 websites will use the outgoing 10.1.0.* addresses. +# Requests going to IPv6 websites will use the outgoing 2001:db8:* addresses. +# +# +# NOTE: The use of this directive using client dependent ACLs is +# incompatible with the use of server side persistent connections. To +# ensure correct results it is best to set server_persistent_connections +# to off when using this directive in such configurations. +# +# NOTE: The use of this directive to set a local IP on outgoing TCP links +# is incompatible with using TPROXY to set client IP out outbound TCP links. +# When needing to contact peers use the no-tproxy cache_peer option and the +# client_dst_passthru directive re-enable normal forwarding such as this. +# +#Default: +# Address selection is performed by the operating system. + +# TAG: host_verify_strict +# Regardless of this option setting, when dealing with intercepted +# traffic, Squid always verifies that the destination IP address matches +# the Host header domain or IP (called 'authority form URL'). +# +# This enforcement is performed to satisfy a MUST-level requirement in +# RFC 2616 section 14.23: "The Host field value MUST represent the naming +# authority of the origin server or gateway given by the original URL". +# +# When set to ON: +# Squid always responds with an HTTP 409 (Conflict) error +# page and logs a security warning if there is no match. +# +# Squid verifies that the destination IP address matches +# the Host header for forward-proxy and reverse-proxy traffic +# as well. For those traffic types, Squid also enables the +# following checks, comparing the corresponding Host header +# and Request-URI components: +# +# * The host names (domain or IP) must be identical, +# but valueless or missing Host header disables all checks. +# For the two host names to match, both must be either IP +# or FQDN. +# +# * Port numbers must be identical, but if a port is missing +# the scheme-default port is assumed. +# +# +# When set to OFF (the default): +# Squid allows suspicious requests to continue but logs a +# security warning and blocks caching of the response. +# +# * Forward-proxy traffic is not checked at all. +# +# * Reverse-proxy traffic is not checked at all. +# +# * Intercepted traffic which passes verification is handled +# according to client_dst_passthru. +# +# * Intercepted requests which fail verification are sent +# to the client original destination instead of DIRECT. +# This overrides 'client_dst_passthru off'. +# +# For now suspicious intercepted CONNECT requests are always +# responded to with an HTTP 409 (Conflict) error page. +# +# +# SECURITY NOTE: +# +# As described in CVE-2009-0801 when the Host: header alone is used +# to determine the destination of a request it becomes trivial for +# malicious scripts on remote websites to bypass browser same-origin +# security policy and sandboxing protections. +# +# The cause of this is that such applets are allowed to perform their +# own HTTP stack, in which case the same-origin policy of the browser +# sandbox only verifies that the applet tries to contact the same IP +# as from where it was loaded at the IP level. The Host: header may +# be different from the connected IP and approved origin. +# +#Default: +# host_verify_strict off + +# TAG: client_dst_passthru +# With NAT or TPROXY intercepted traffic Squid may pass the request +# directly to the original client destination IP or seek a faster +# source using the HTTP Host header. +# +# Using Host to locate alternative servers can provide faster +# connectivity with a range of failure recovery options. +# But can also lead to connectivity trouble when the client and +# server are attempting stateful interactions unaware of the proxy. +# +# This option (on by default) prevents alternative DNS entries being +# located to send intercepted traffic DIRECT to an origin server. +# The clients original destination IP and port will be used instead. +# +# Regardless of this option setting, when dealing with intercepted +# traffic Squid will verify the Host: header and any traffic which +# fails Host verification will be treated as if this option were ON. +# +# see host_verify_strict for details on the verification process. +#Default: +# client_dst_passthru on + +# SSL OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: ssl_unclean_shutdown +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Some browsers (especially MSIE) bugs out on SSL shutdown +# messages. +#Default: +# ssl_unclean_shutdown off + +# TAG: ssl_engine +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# The OpenSSL engine to use. You will need to set this if you +# would like to use hardware SSL acceleration for example. +#Default: +# none + +# TAG: sslproxy_client_certificate +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Client SSL Certificate to use when proxying https:// URLs +#Default: +# none + +# TAG: sslproxy_client_key +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Client SSL Key to use when proxying https:// URLs +#Default: +# none + +# TAG: sslproxy_version +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# SSL version level to use when proxying https:// URLs +# +# The versions of SSL/TLS supported: +# +# 1 automatic (default) +# 2 SSLv2 only +# 3 SSLv3 only +# 4 TLSv1.0 only +# 5 TLSv1.1 only +# 6 TLSv1.2 only +#Default: +# automatic SSL/TLS version negotiation + +# TAG: sslproxy_options +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Colon (:) or comma (,) separated list of SSL implementation options +# to use when proxying https:// URLs +# +# The most important being: +# +# NO_SSLv2 Disallow the use of SSLv2 +# NO_SSLv3 Disallow the use of SSLv3 +# NO_TLSv1 Disallow the use of TLSv1.0 +# NO_TLSv1_1 Disallow the use of TLSv1.1 +# NO_TLSv1_2 Disallow the use of TLSv1.2 +# +# SINGLE_DH_USE +# Always create a new key when using temporary/ephemeral +# DH key exchanges +# +# NO_TICKET +# Disable use of RFC5077 session tickets. Some servers +# may have problems understanding the TLS extension due +# to ambiguous specification in RFC4507. +# +# ALL Enable various bug workarounds suggested as "harmless" +# by OpenSSL. Be warned that this may reduce SSL/TLS +# strength to some attacks. +# +# See the OpenSSL SSL_CTX_set_options documentation for a +# complete list of possible options. +# +# WARNING: This directive takes a single token. If a space is used +# the value(s) after that space are SILENTLY IGNORED. +#Default: +# none + +# TAG: sslproxy_cipher +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# SSL cipher list to use when proxying https:// URLs +# +# Colon separated list of supported ciphers. +#Default: +# none + +# TAG: sslproxy_cafile +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# file containing CA certificates to use when verifying server +# certificates while proxying https:// URLs +#Default: +# none + +# TAG: sslproxy_capath +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# directory containing CA certificates to use when verifying +# server certificates while proxying https:// URLs +#Default: +# none + +# TAG: sslproxy_session_ttl +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Sets the timeout value for SSL sessions +#Default: +# sslproxy_session_ttl 300 + +# TAG: sslproxy_session_cache_size +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Sets the cache size to use for ssl session +#Default: +# sslproxy_session_cache_size 2 MB + +# TAG: sslproxy_foreign_intermediate_certs +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Many origin servers fail to send their full server certificate +# chain for verification, assuming the client already has or can +# easily locate any missing intermediate certificates. +# +# Squid uses the certificates from the specified file to fill in +# these missing chains when trying to validate origin server +# certificate chains. +# +# The file is expected to contain zero or more PEM-encoded +# intermediate certificates. These certificates are not treated +# as trusted root certificates, and any self-signed certificate in +# this file will be ignored. +#Default: +# none + +# TAG: sslproxy_cert_sign_hash +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Sets the hashing algorithm to use when signing generated certificates. +# Valid algorithm names depend on the OpenSSL library used. The following +# names are usually available: sha1, sha256, sha512, and md5. Please see +# your OpenSSL library manual for the available hashes. By default, Squids +# that support this option use sha256 hashes. +# +# Squid does not forcefully purge cached certificates that were generated +# with an algorithm other than the currently configured one. They remain +# in the cache, subject to the regular cache eviction policy, and become +# useful if the algorithm changes again. +#Default: +# none + +# TAG: ssl_bump +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# This option is consulted when a CONNECT request is received on +# an http_port (or a new connection is intercepted at an +# https_port), provided that port was configured with an ssl-bump +# flag. The subsequent data on the connection is either treated as +# HTTPS and decrypted OR tunneled at TCP level without decryption, +# depending on the first matching bumping "action". +# +# ssl_bump [!]acl ... +# +# The following bumping actions are currently supported: +# +# splice +# Become a TCP tunnel without decrypting proxied traffic. +# This is the default action. +# +# bump +# When used on step SslBump1, establishes a secure connection +# with the client first, then connect to the server. +# When used on step SslBump2 or SslBump3, establishes a secure +# connection with the server and, using a mimicked server +# certificate, with the client. +# +# peek +# Receive client (step SslBump1) or server (step SslBump2) +# certificate while preserving the possibility of splicing the +# connection. Peeking at the server certificate (during step 2) +# usually precludes bumping of the connection at step 3. +# +# stare +# Receive client (step SslBump1) or server (step SslBump2) +# certificate while preserving the possibility of bumping the +# connection. Staring at the server certificate (during step 2) +# usually precludes splicing of the connection at step 3. +# +# terminate +# Close client and server connections. +# +# Backward compatibility actions available at step SslBump1: +# +# client-first +# Bump the connection. Establish a secure connection with the +# client first, then connect to the server. This old mode does +# not allow Squid to mimic server SSL certificate and does not +# work with intercepted SSL connections. +# +# server-first +# Bump the connection. Establish a secure connection with the +# server first, then establish a secure connection with the +# client, using a mimicked server certificate. Works with both +# CONNECT requests and intercepted SSL connections, but does +# not allow to make decisions based on SSL handshake info. +# +# peek-and-splice +# Decide whether to bump or splice the connection based on +# client-to-squid and server-to-squid SSL hello messages. +# XXX: Remove. +# +# none +# Same as the "splice" action. +# +# All ssl_bump rules are evaluated at each of the supported bumping +# steps. Rules with actions that are impossible at the current step are +# ignored. The first matching ssl_bump action wins and is applied at the +# end of the current step. If no rules match, the splice action is used. +# See the at_step ACL for a list of the supported SslBump steps. +# +# This clause supports both fast and slow acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +# See also: http_port ssl-bump, https_port ssl-bump, and acl at_step. +# +# +# # Example: Bump all TLS connections except those originating from +# # localhost or those going to example.com. +# +# acl broken_sites ssl::server_name .example.com +# ssl_bump splice localhost +# ssl_bump splice broken_sites +# ssl_bump bump all +#Default: +# Become a TCP tunnel without decrypting proxied traffic. + +# TAG: sslproxy_flags +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Various flags modifying the use of SSL while proxying https:// URLs: +# DONT_VERIFY_PEER Accept certificates that fail verification. +# For refined control, see sslproxy_cert_error. +# NO_DEFAULT_CA Don't use the default CA list built in +# to OpenSSL. +#Default: +# none + +# TAG: sslproxy_cert_error +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Use this ACL to bypass server certificate validation errors. +# +# For example, the following lines will bypass all validation errors +# when talking to servers for example.com. All other +# validation errors will result in ERR_SECURE_CONNECT_FAIL error. +# +# acl BrokenButTrustedServers dstdomain example.com +# sslproxy_cert_error allow BrokenButTrustedServers +# sslproxy_cert_error deny all +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# Using slow acl types may result in server crashes +# +# Without this option, all server certificate validation errors +# terminate the transaction to protect Squid and the client. +# +# SQUID_X509_V_ERR_INFINITE_VALIDATION error cannot be bypassed +# but should not happen unless your OpenSSL library is buggy. +# +# SECURITY WARNING: +# Bypassing validation errors is dangerous because an +# error usually implies that the server cannot be trusted +# and the connection may be insecure. +# +# See also: sslproxy_flags and DONT_VERIFY_PEER. +#Default: +# Server certificate errors terminate the transaction. + +# TAG: sslproxy_cert_sign +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# +# sslproxy_cert_sign acl ... +# +# The following certificate signing algorithms are supported: +# +# signTrusted +# Sign using the configured CA certificate which is usually +# placed in and trusted by end-user browsers. This is the +# default for trusted origin server certificates. +# +# signUntrusted +# Sign to guarantee an X509_V_ERR_CERT_UNTRUSTED browser error. +# This is the default for untrusted origin server certificates +# that are not self-signed (see ssl::certUntrusted). +# +# signSelf +# Sign using a self-signed certificate with the right CN to +# generate a X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT error in the +# browser. This is the default for self-signed origin server +# certificates (see ssl::certSelfSigned). +# +# This clause only supports fast acl types. +# +# When sslproxy_cert_sign acl(s) match, Squid uses the corresponding +# signing algorithm to generate the certificate and ignores all +# subsequent sslproxy_cert_sign options (the first match wins). If no +# acl(s) match, the default signing algorithm is determined by errors +# detected when obtaining and validating the origin server certificate. +# +# WARNING: SQUID_X509_V_ERR_DOMAIN_MISMATCH and ssl:certDomainMismatch can +# be used with sslproxy_cert_adapt, but if and only if Squid is bumping a +# CONNECT request that carries a domain name. In all other cases (CONNECT +# to an IP address or an intercepted SSL connection), Squid cannot detect +# the domain mismatch at certificate generation time when +# bump-server-first is used. +#Default: +# none + +# TAG: sslproxy_cert_adapt +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# +# sslproxy_cert_adapt acl ... +# +# The following certificate adaptation algorithms are supported: +# +# setValidAfter +# Sets the "Not After" property to the "Not After" property of +# the CA certificate used to sign generated certificates. +# +# setValidBefore +# Sets the "Not Before" property to the "Not Before" property of +# the CA certificate used to sign generated certificates. +# +# setCommonName or setCommonName{CN} +# Sets Subject.CN property to the host name specified as a +# CN parameter or, if no explicit CN parameter was specified, +# extracted from the CONNECT request. It is a misconfiguration +# to use setCommonName without an explicit parameter for +# intercepted or tproxied SSL connections. +# +# This clause only supports fast acl types. +# +# Squid first groups sslproxy_cert_adapt options by adaptation algorithm. +# Within a group, when sslproxy_cert_adapt acl(s) match, Squid uses the +# corresponding adaptation algorithm to generate the certificate and +# ignores all subsequent sslproxy_cert_adapt options in that algorithm's +# group (i.e., the first match wins within each algorithm group). If no +# acl(s) match, the default mimicking action takes place. +# +# WARNING: SQUID_X509_V_ERR_DOMAIN_MISMATCH and ssl:certDomainMismatch can +# be used with sslproxy_cert_adapt, but if and only if Squid is bumping a +# CONNECT request that carries a domain name. In all other cases (CONNECT +# to an IP address or an intercepted SSL connection), Squid cannot detect +# the domain mismatch at certificate generation time when +# bump-server-first is used. +#Default: +# none + +# TAG: sslpassword_program +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Specify a program used for entering SSL key passphrases +# when using encrypted SSL certificate keys. If not specified +# keys must either be unencrypted, or Squid started with the -N +# option to allow it to query interactively for the passphrase. +# +# The key file name is given as argument to the program allowing +# selection of the right password if you have multiple encrypted +# keys. +#Default: +# none + +# OPTIONS RELATING TO EXTERNAL SSL_CRTD +# ----------------------------------------------------------------------------- + +# TAG: sslcrtd_program +# Note: This option is only available if Squid is rebuilt with the +# --enable-ssl-crtd +# +# Specify the location and options of the executable for ssl_crtd process. +# /usr/lib/squid/ssl_crtd program requires -s and -M parameters +# For more information use: +# /usr/lib/squid/ssl_crtd -h +#Default: +# sslcrtd_program /usr/lib/squid/ssl_crtd -s /var/lib/ssl_db -M 4MB + +# TAG: sslcrtd_children +# Note: This option is only available if Squid is rebuilt with the +# --enable-ssl-crtd +# +# The maximum number of processes spawn to service ssl server. +# The maximum this may be safely set to is 32. +# +# The startup= and idle= options allow some measure of skew in your +# tuning. +# +# startup=N +# +# Sets the minimum number of processes to spawn when Squid +# starts or reconfigures. When set to zero the first request will +# cause spawning of the first child process to handle it. +# +# Starting too few children temporary slows Squid under load while it +# tries to spawn enough additional processes to cope with traffic. +# +# idle=N +# +# Sets a minimum of how many processes Squid is to try and keep available +# at all times. When traffic begins to rise above what the existing +# processes can handle this many more will be spawned up to the maximum +# configured. A minimum setting of 1 is required. +# +# You must have at least one ssl_crtd process. +#Default: +# sslcrtd_children 32 startup=5 idle=1 + +# TAG: sslcrtvalidator_program +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# Specify the location and options of the executable for ssl_crt_validator +# process. +# +# Usage: sslcrtvalidator_program [ttl=n] [cache=n] path ... +# +# Options: +# ttl=n TTL in seconds for cached results. The default is 60 secs +# cache=n limit the result cache size. The default value is 2048 +#Default: +# none + +# TAG: sslcrtvalidator_children +# Note: This option is only available if Squid is rebuilt with the +# --with-openssl +# +# The maximum number of processes spawn to service SSL server. +# The maximum this may be safely set to is 32. +# +# The startup= and idle= options allow some measure of skew in your +# tuning. +# +# startup=N +# +# Sets the minimum number of processes to spawn when Squid +# starts or reconfigures. When set to zero the first request will +# cause spawning of the first child process to handle it. +# +# Starting too few children temporary slows Squid under load while it +# tries to spawn enough additional processes to cope with traffic. +# +# idle=N +# +# Sets a minimum of how many processes Squid is to try and keep available +# at all times. When traffic begins to rise above what the existing +# processes can handle this many more will be spawned up to the maximum +# configured. A minimum setting of 1 is required. +# +# concurrency= +# +# The number of requests each certificate validator helper can handle in +# parallel. A value of 0 indicates the certficate validator does not +# support concurrency. Defaults to 1. +# +# When this directive is set to a value >= 1 then the protocol +# used to communicate with the helper is modified to include +# a request ID in front of the request/response. The request +# ID from the request must be echoed back with the response +# to that request. +# +# You must have at least one ssl_crt_validator process. +#Default: +# sslcrtvalidator_children 32 startup=5 idle=1 concurrency=1 + +# OPTIONS WHICH AFFECT THE NEIGHBOR SELECTION ALGORITHM +# ----------------------------------------------------------------------------- + +# TAG: cache_peer +# To specify other caches in a hierarchy, use the format: +# +# cache_peer hostname type http-port icp-port [options] +# +# For example, +# +# # proxy icp +# # hostname type port port options +# # -------------------- -------- ----- ----- ----------- +# cache_peer parent.foo.net parent 3128 3130 default +# cache_peer sib1.foo.net sibling 3128 3130 proxy-only +# cache_peer sib2.foo.net sibling 3128 3130 proxy-only +# cache_peer example.com parent 80 0 default +# cache_peer cdn.example.com sibling 3128 0 +# +# type: either 'parent', 'sibling', or 'multicast'. +# +# proxy-port: The port number where the peer accept HTTP requests. +# For other Squid proxies this is usually 3128 +# For web servers this is usually 80 +# +# icp-port: Used for querying neighbor caches about objects. +# Set to 0 if the peer does not support ICP or HTCP. +# See ICP and HTCP options below for additional details. +# +# +# ==== ICP OPTIONS ==== +# +# You MUST also set icp_port and icp_access explicitly when using these options. +# The defaults will prevent peer traffic using ICP. +# +# +# no-query Disable ICP queries to this neighbor. +# +# multicast-responder +# Indicates the named peer is a member of a multicast group. +# ICP queries will not be sent directly to the peer, but ICP +# replies will be accepted from it. +# +# closest-only Indicates that, for ICP_OP_MISS replies, we'll only forward +# CLOSEST_PARENT_MISSes and never FIRST_PARENT_MISSes. +# +# background-ping +# To only send ICP queries to this neighbor infrequently. +# This is used to keep the neighbor round trip time updated +# and is usually used in conjunction with weighted-round-robin. +# +# +# ==== HTCP OPTIONS ==== +# +# You MUST also set htcp_port and htcp_access explicitly when using these options. +# The defaults will prevent peer traffic using HTCP. +# +# +# htcp Send HTCP, instead of ICP, queries to the neighbor. +# You probably also want to set the "icp-port" to 4827 +# instead of 3130. This directive accepts a comma separated +# list of options described below. +# +# htcp=oldsquid Send HTCP to old Squid versions (2.5 or earlier). +# +# htcp=no-clr Send HTCP to the neighbor but without +# sending any CLR requests. This cannot be used with +# only-clr. +# +# htcp=only-clr Send HTCP to the neighbor but ONLY CLR requests. +# This cannot be used with no-clr. +# +# htcp=no-purge-clr +# Send HTCP to the neighbor including CLRs but only when +# they do not result from PURGE requests. +# +# htcp=forward-clr +# Forward any HTCP CLR requests this proxy receives to the peer. +# +# +# ==== PEER SELECTION METHODS ==== +# +# The default peer selection method is ICP, with the first responding peer +# being used as source. These options can be used for better load balancing. +# +# +# default This is a parent cache which can be used as a "last-resort" +# if a peer cannot be located by any of the peer-selection methods. +# If specified more than once, only the first is used. +# +# round-robin Load-Balance parents which should be used in a round-robin +# fashion in the absence of any ICP queries. +# weight=N can be used to add bias. +# +# weighted-round-robin +# Load-Balance parents which should be used in a round-robin +# fashion with the frequency of each parent being based on the +# round trip time. Closer parents are used more often. +# Usually used for background-ping parents. +# weight=N can be used to add bias. +# +# carp Load-Balance parents which should be used as a CARP array. +# The requests will be distributed among the parents based on the +# CARP load balancing hash function based on their weight. +# +# userhash Load-balance parents based on the client proxy_auth or ident username. +# +# sourcehash Load-balance parents based on the client source IP. +# +# multicast-siblings +# To be used only for cache peers of type "multicast". +# ALL members of this multicast group have "sibling" +# relationship with it, not "parent". This is to a multicast +# group when the requested object would be fetched only from +# a "parent" cache, anyway. It's useful, e.g., when +# configuring a pool of redundant Squid proxies, being +# members of the same multicast group. +# +# +# ==== PEER SELECTION OPTIONS ==== +# +# weight=N use to affect the selection of a peer during any weighted +# peer-selection mechanisms. +# The weight must be an integer; default is 1, +# larger weights are favored more. +# This option does not affect parent selection if a peering +# protocol is not in use. +# +# basetime=N Specify a base amount to be subtracted from round trip +# times of parents. +# It is subtracted before division by weight in calculating +# which parent to fectch from. If the rtt is less than the +# base time the rtt is set to a minimal value. +# +# ttl=N Specify a TTL to use when sending multicast ICP queries +# to this address. +# Only useful when sending to a multicast group. +# Because we don't accept ICP replies from random +# hosts, you must configure other group members as +# peers with the 'multicast-responder' option. +# +# no-delay To prevent access to this neighbor from influencing the +# delay pools. +# +# digest-url=URL Tell Squid to fetch the cache digest (if digests are +# enabled) for this host from the specified URL rather +# than the Squid default location. +# +# +# ==== CARP OPTIONS ==== +# +# carp-key=key-specification +# use a different key than the full URL to hash against the peer. +# the key-specification is a comma-separated list of the keywords +# scheme, host, port, path, params +# Order is not important. +# +# ==== ACCELERATOR / REVERSE-PROXY OPTIONS ==== +# +# originserver Causes this parent to be contacted as an origin server. +# Meant to be used in accelerator setups when the peer +# is a web server. +# +# forceddomain=name +# Set the Host header of requests forwarded to this peer. +# Useful in accelerator setups where the server (peer) +# expects a certain domain name but clients may request +# others. ie example.com or www.example.com +# +# no-digest Disable request of cache digests. +# +# no-netdb-exchange +# Disables requesting ICMP RTT database (NetDB). +# +# +# ==== AUTHENTICATION OPTIONS ==== +# +# login=user:password +# If this is a personal/workgroup proxy and your parent +# requires proxy authentication. +# +# Note: The string can include URL escapes (i.e. %20 for +# spaces). This also means % must be written as %%. +# +# login=PASSTHRU +# Send login details received from client to this peer. +# Both Proxy- and WWW-Authorization headers are passed +# without alteration to the peer. +# Authentication is not required by Squid for this to work. +# +# Note: This will pass any form of authentication but +# only Basic auth will work through a proxy unless the +# connection-auth options are also used. +# +# login=PASS Send login details received from client to this peer. +# Authentication is not required by this option. +# +# If there are no client-provided authentication headers +# to pass on, but username and password are available +# from an external ACL user= and password= result tags +# they may be sent instead. +# +# Note: To combine this with proxy_auth both proxies must +# share the same user database as HTTP only allows for +# a single login (one for proxy, one for origin server). +# Also be warned this will expose your users proxy +# password to the peer. USE WITH CAUTION +# +# login=*:password +# Send the username to the upstream cache, but with a +# fixed password. This is meant to be used when the peer +# is in another administrative domain, but it is still +# needed to identify each user. +# The star can optionally be followed by some extra +# information which is added to the username. This can +# be used to identify this proxy to the peer, similar to +# the login=username:password option above. +# +# login=NEGOTIATE +# If this is a personal/workgroup proxy and your parent +# requires a secure proxy authentication. +# The first principal from the default keytab or defined by +# the environment variable KRB5_KTNAME will be used. +# +# WARNING: The connection may transmit requests from multiple +# clients. Negotiate often assumes end-to-end authentication +# and a single-client. Which is not strictly true here. +# +# login=NEGOTIATE:principal_name +# If this is a personal/workgroup proxy and your parent +# requires a secure proxy authentication. +# The principal principal_name from the default keytab or +# defined by the environment variable KRB5_KTNAME will be +# used. +# +# WARNING: The connection may transmit requests from multiple +# clients. Negotiate often assumes end-to-end authentication +# and a single-client. Which is not strictly true here. +# +# connection-auth=on|off +# Tell Squid that this peer does or not support Microsoft +# connection oriented authentication, and any such +# challenges received from there should be ignored. +# Default is auto to automatically determine the status +# of the peer. +# +# +# ==== SSL / HTTPS / TLS OPTIONS ==== +# +# ssl Encrypt connections to this peer with SSL/TLS. +# +# sslcert=/path/to/ssl/certificate +# A client SSL certificate to use when connecting to +# this peer. +# +# sslkey=/path/to/ssl/key +# The private SSL key corresponding to sslcert above. +# If 'sslkey' is not specified 'sslcert' is assumed to +# reference a combined file containing both the +# certificate and the key. +# +# Notes: +# +# On Debian/Ubuntu systems a default snakeoil certificate is +# available in /etc/ssl and users can set: +# +# cert=/etc/ssl/certs/ssl-cert-snakeoil.pem +# +# and +# +# key=/etc/ssl/private/ssl-cert-snakeoil.key +# +# for testing. +# +# sslversion=1|2|3|4|5|6 +# The SSL version to use when connecting to this peer +# 1 = automatic (default) +# 2 = SSL v2 only +# 3 = SSL v3 only +# 4 = TLS v1.0 only +# 5 = TLS v1.1 only +# 6 = TLS v1.2 only +# +# sslcipher=... The list of valid SSL ciphers to use when connecting +# to this peer. +# +# ssloptions=... Specify various SSL implementation options: +# +# NO_SSLv2 Disallow the use of SSLv2 +# NO_SSLv3 Disallow the use of SSLv3 +# NO_TLSv1 Disallow the use of TLSv1.0 +# NO_TLSv1_1 Disallow the use of TLSv1.1 +# NO_TLSv1_2 Disallow the use of TLSv1.2 +# +# SINGLE_DH_USE +# Always create a new key when using +# temporary/ephemeral DH key exchanges +# +# NO_TICKET +# Disable use of RFC5077 session tickets. Some servers +# may have problems understanding the TLS extension due +# to ambiguous specification in RFC4507. +# +# ALL Enable various bug workarounds +# suggested as "harmless" by OpenSSL +# Be warned that this reduces SSL/TLS +# strength to some attacks. +# +# See the OpenSSL SSL_CTX_set_options documentation for a +# more complete list. +# +# sslcafile=... A file containing additional CA certificates to use +# when verifying the peer certificate. +# +# sslcapath=... A directory containing additional CA certificates to +# use when verifying the peer certificate. +# +# sslcrlfile=... A certificate revocation list file to use when +# verifying the peer certificate. +# +# sslflags=... Specify various flags modifying the SSL implementation: +# +# DONT_VERIFY_PEER +# Accept certificates even if they fail to +# verify. +# NO_DEFAULT_CA +# Don't use the default CA list built in +# to OpenSSL. +# DONT_VERIFY_DOMAIN +# Don't verify the peer certificate +# matches the server name +# +# ssldomain= The peer name as advertised in it's certificate. +# Used for verifying the correctness of the received peer +# certificate. If not specified the peer hostname will be +# used. +# +# front-end-https +# Enable the "Front-End-Https: On" header needed when +# using Squid as a SSL frontend in front of Microsoft OWA. +# See MS KB document Q307347 for details on this header. +# If set to auto the header will only be added if the +# request is forwarded as a https:// URL. +# +# +# ==== GENERAL OPTIONS ==== +# +# connect-timeout=N +# A peer-specific connect timeout. +# Also see the peer_connect_timeout directive. +# +# connect-fail-limit=N +# How many times connecting to a peer must fail before +# it is marked as down. Standby connection failures +# count towards this limit. Default is 10. +# +# allow-miss Disable Squid's use of only-if-cached when forwarding +# requests to siblings. This is primarily useful when +# icp_hit_stale is used by the sibling. Excessive use +# of this option may result in forwarding loops. One way +# to prevent peering loops when using this option, is to +# deny cache peer usage on requests from a peer: +# acl fromPeer ... +# cache_peer_access peerName deny fromPeer +# +# max-conn=N Limit the number of concurrent connections the Squid +# may open to this peer, including already opened idle +# and standby connections. There is no peer-specific +# connection limit by default. +# +# A peer exceeding the limit is not used for new +# requests unless a standby connection is available. +# +# max-conn currently works poorly with idle persistent +# connections: When a peer reaches its max-conn limit, +# and there are idle persistent connections to the peer, +# the peer may not be selected because the limiting code +# does not know whether Squid can reuse those idle +# connections. +# +# standby=N Maintain a pool of N "hot standby" connections to an +# UP peer, available for requests when no idle +# persistent connection is available (or safe) to use. +# By default and with zero N, no such pool is maintained. +# N must not exceed the max-conn limit (if any). +# +# At start or after reconfiguration, Squid opens new TCP +# standby connections until there are N connections +# available and then replenishes the standby pool as +# opened connections are used up for requests. A used +# connection never goes back to the standby pool, but +# may go to the regular idle persistent connection pool +# shared by all peers and origin servers. +# +# Squid never opens multiple new standby connections +# concurrently. This one-at-a-time approach minimizes +# flooding-like effect on peers. Furthermore, just a few +# standby connections should be sufficient in most cases +# to supply most new requests with a ready-to-use +# connection. +# +# Standby connections obey server_idle_pconn_timeout. +# For the feature to work as intended, the peer must be +# configured to accept and keep them open longer than +# the idle timeout at the connecting Squid, to minimize +# race conditions typical to idle used persistent +# connections. Default request_timeout and +# server_idle_pconn_timeout values ensure such a +# configuration. +# +# name=xxx Unique name for the peer. +# Required if you have multiple peers on the same host +# but different ports. +# This name can be used in cache_peer_access and similar +# directives to identify the peer. +# Can be used by outgoing access controls through the +# peername ACL type. +# +# no-tproxy Do not use the client-spoof TPROXY support when forwarding +# requests to this peer. Use normal address selection instead. +# This overrides the spoof_client_ip ACL. +# +# proxy-only objects fetched from the peer will not be stored locally. +# +#Default: +# none + +# TAG: cache_peer_domain +# Use to limit the domains for which a neighbor cache will be +# queried. +# +# Usage: +# cache_peer_domain cache-host domain [domain ...] +# cache_peer_domain cache-host !domain +# +# For example, specifying +# +# cache_peer_domain parent.foo.net .edu +# +# has the effect such that UDP query packets are sent to +# 'bigserver' only when the requested object exists on a +# server in the .edu domain. Prefixing the domainname +# with '!' means the cache will be queried for objects +# NOT in that domain. +# +# NOTE: * Any number of domains may be given for a cache-host, +# either on the same or separate lines. +# * When multiple domains are given for a particular +# cache-host, the first matched domain is applied. +# * Cache hosts with no domain restrictions are queried +# for all requests. +# * There are no defaults. +# * There is also a 'cache_peer_access' tag in the ACL +# section. +#Default: +# none + +# TAG: cache_peer_access +# Restricts usage of cache_peer proxies. +# +# Usage: +# cache_peer_access peer-name allow|deny [!]aclname ... +# +# For the required peer-name parameter, use either the value of the +# cache_peer name=value parameter or, if name=value is missing, the +# cache_peer hostname parameter. +# +# This directive narrows down the selection of peering candidates, but +# does not determine the order in which the selected candidates are +# contacted. That order is determined by the peer selection algorithms +# (see PEER SELECTION sections in the cache_peer documentation). +# +# If a deny rule matches, the corresponding peer will not be contacted +# for the current transaction -- Squid will not send ICP queries and +# will not forward HTTP requests to that peer. An allow match leaves +# the corresponding peer in the selection. The first match for a given +# peer wins for that peer. +# +# The relative order of cache_peer_access directives for the same peer +# matters. The relative order of any two cache_peer_access directives +# for different peers does not matter. To ease interpretation, it is a +# good idea to group cache_peer_access directives for the same peer +# together. +# +# A single cache_peer_access directive may be evaluated multiple times +# for a given transaction because individual peer selection algorithms +# may check it independently from each other. These redundant checks +# may be optimized away in future Squid versions. +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# No peer usage restrictions. + +# TAG: neighbor_type_domain +# Modify the cache_peer neighbor type when passing requests +# about specific domains to the peer. +# +# Usage: +# neighbor_type_domain neighbor parent|sibling domain domain ... +# +# For example: +# cache_peer foo.example.com parent 3128 3130 +# neighbor_type_domain foo.example.com sibling .au .de +# +# The above configuration treats all requests to foo.example.com as a +# parent proxy unless the request is for a .au or .de ccTLD domain name. +#Default: +# The peer type from cache_peer directive is used for all requests to that peer. + +# TAG: dead_peer_timeout (seconds) +# This controls how long Squid waits to declare a peer cache +# as "dead." If there are no ICP replies received in this +# amount of time, Squid will declare the peer dead and not +# expect to receive any further ICP replies. However, it +# continues to send ICP queries, and will mark the peer as +# alive upon receipt of the first subsequent ICP reply. +# +# This timeout also affects when Squid expects to receive ICP +# replies from peers. If more than 'dead_peer' seconds have +# passed since the last ICP reply was received, Squid will not +# expect to receive an ICP reply on the next query. Thus, if +# your time between requests is greater than this timeout, you +# will see a lot of requests sent DIRECT to origin servers +# instead of to your parents. +#Default: +# dead_peer_timeout 10 seconds + +# TAG: forward_max_tries +# Controls how many different forward paths Squid will try +# before giving up. See also forward_timeout. +# +# NOTE: connect_retries (default: none) can make each of these +# possible forwarding paths be tried multiple times. +#Default: +# forward_max_tries 25 + +# MEMORY CACHE OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: cache_mem (bytes) +# NOTE: THIS PARAMETER DOES NOT SPECIFY THE MAXIMUM PROCESS SIZE. +# IT ONLY PLACES A LIMIT ON HOW MUCH ADDITIONAL MEMORY SQUID WILL +# USE AS A MEMORY CACHE OF OBJECTS. SQUID USES MEMORY FOR OTHER +# THINGS AS WELL. SEE THE SQUID FAQ SECTION 8 FOR DETAILS. +# +# 'cache_mem' specifies the ideal amount of memory to be used +# for: +# * In-Transit objects +# * Hot Objects +# * Negative-Cached objects +# +# Data for these objects are stored in 4 KB blocks. This +# parameter specifies the ideal upper limit on the total size of +# 4 KB blocks allocated. In-Transit objects take the highest +# priority. +# +# In-transit objects have priority over the others. When +# additional space is needed for incoming data, negative-cached +# and hot objects will be released. In other words, the +# negative-cached and hot objects will fill up any unused space +# not needed for in-transit objects. +# +# If circumstances require, this limit will be exceeded. +# Specifically, if your incoming request rate requires more than +# 'cache_mem' of memory to hold in-transit objects, Squid will +# exceed this limit to satisfy the new requests. When the load +# decreases, blocks will be freed until the high-water mark is +# reached. Thereafter, blocks will be used to store hot +# objects. +# +# If shared memory caching is enabled, Squid does not use the shared +# cache space for in-transit objects, but they still consume as much +# local memory as they need. For more details about the shared memory +# cache, see memory_cache_shared. +#Default: +# cache_mem 256 MB + +# TAG: maximum_object_size_in_memory (bytes) +# Objects greater than this size will not be attempted to kept in +# the memory cache. This should be set high enough to keep objects +# accessed frequently in memory to improve performance whilst low +# enough to keep larger objects from hoarding cache_mem. +#Default: +# maximum_object_size_in_memory 512 KB + +# TAG: memory_cache_shared on|off +# Controls whether the memory cache is shared among SMP workers. +# +# The shared memory cache is meant to occupy cache_mem bytes and replace +# the non-shared memory cache, although some entities may still be +# cached locally by workers for now (e.g., internal and in-transit +# objects may be served from a local memory cache even if shared memory +# caching is enabled). +# +# By default, the memory cache is shared if and only if all of the +# following conditions are satisfied: Squid runs in SMP mode with +# multiple workers, cache_mem is positive, and Squid environment +# supports required IPC primitives (e.g., POSIX shared memory segments +# and GCC-style atomic operations). +# +# To avoid blocking locks, shared memory uses opportunistic algorithms +# that do not guarantee that every cachable entity that could have been +# shared among SMP workers will actually be shared. +#Default: +# "on" where supported if doing memory caching with multiple SMP workers. + +# TAG: memory_cache_mode +# Controls which objects to keep in the memory cache (cache_mem) +# +# always Keep most recently fetched objects in memory (default) +# +# disk Only disk cache hits are kept in memory, which means +# an object must first be cached on disk and then hit +# a second time before cached in memory. +# +# network Only objects fetched from network is kept in memory +#Default: +# Keep the most recently fetched objects in memory + +# TAG: memory_replacement_policy +# The memory replacement policy parameter determines which +# objects are purged from memory when memory space is needed. +# +# See cache_replacement_policy for details on algorithms. +#Default: +# memory_replacement_policy lru + +# DISK CACHE OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: cache_replacement_policy +# The cache replacement policy parameter determines which +# objects are evicted (replaced) when disk space is needed. +# +# lru : Squid's original list based LRU policy +# heap GDSF : Greedy-Dual Size Frequency +# heap LFUDA: Least Frequently Used with Dynamic Aging +# heap LRU : LRU policy implemented using a heap +# +# Applies to any cache_dir lines listed below this directive. +# +# The LRU policies keeps recently referenced objects. +# +# The heap GDSF policy optimizes object hit rate by keeping smaller +# popular objects in cache so it has a better chance of getting a +# hit. It achieves a lower byte hit rate than LFUDA though since +# it evicts larger (possibly popular) objects. +# +# The heap LFUDA policy keeps popular objects in cache regardless of +# their size and thus optimizes byte hit rate at the expense of +# hit rate since one large, popular object will prevent many +# smaller, slightly less popular objects from being cached. +# +# Both policies utilize a dynamic aging mechanism that prevents +# cache pollution that can otherwise occur with frequency-based +# replacement policies. +# +# NOTE: if using the LFUDA replacement policy you should increase +# the value of maximum_object_size above its default of 4 MB to +# to maximize the potential byte hit rate improvement of LFUDA. +# +# For more information about the GDSF and LFUDA cache replacement +# policies see http://www.hpl.hp.com/techreports/1999/HPL-1999-69.html +# and http://fog.hpl.external.hp.com/techreports/98/HPL-98-173.html. +#Default: +# cache_replacement_policy lru + +# TAG: minimum_object_size (bytes) +# Objects smaller than this size will NOT be saved on disk. The +# value is specified in bytes, and the default is 0 KB, which +# means all responses can be stored. +#Default: +# no limit + +# TAG: maximum_object_size (bytes) +# Set the default value for max-size parameter on any cache_dir. +# The value is specified in bytes, and the default is 4 MB. +# +# If you wish to get a high BYTES hit ratio, you should probably +# increase this (one 32 MB object hit counts for 3200 10KB +# hits). +# +# If you wish to increase hit ratio more than you want to +# save bandwidth you should leave this low. +# +# NOTE: if using the LFUDA replacement policy you should increase +# this value to maximize the byte hit rate improvement of LFUDA! +# See cache_replacement_policy for a discussion of this policy. +#Default: +# maximum_object_size 4 MB + +# TAG: cache_dir +# Format: +# cache_dir Type Directory-Name Fs-specific-data [options] +# +# You can specify multiple cache_dir lines to spread the +# cache among different disk partitions. +# +# Type specifies the kind of storage system to use. Only "ufs" +# is built by default. To enable any of the other storage systems +# see the --enable-storeio configure option. +# +# 'Directory' is a top-level directory where cache swap +# files will be stored. If you want to use an entire disk +# for caching, this can be the mount-point directory. +# The directory must exist and be writable by the Squid +# process. Squid will NOT create this directory for you. +# +# In SMP configurations, cache_dir must not precede the workers option +# and should use configuration macros or conditionals to give each +# worker interested in disk caching a dedicated cache directory. +# +# +# ==== The ufs store type ==== +# +# "ufs" is the old well-known Squid storage format that has always +# been there. +# +# Usage: +# cache_dir ufs Directory-Name Mbytes L1 L2 [options] +# +# 'Mbytes' is the amount of disk space (MB) to use under this +# directory. The default is 100 MB. Change this to suit your +# configuration. Do NOT put the size of your disk drive here. +# Instead, if you want Squid to use the entire disk drive, +# subtract 20% and use that value. +# +# 'L1' is the number of first-level subdirectories which +# will be created under the 'Directory'. The default is 16. +# +# 'L2' is the number of second-level subdirectories which +# will be created under each first-level directory. The default +# is 256. +# +# +# ==== The aufs store type ==== +# +# "aufs" uses the same storage format as "ufs", utilizing +# POSIX-threads to avoid blocking the main Squid process on +# disk-I/O. This was formerly known in Squid as async-io. +# +# Usage: +# cache_dir aufs Directory-Name Mbytes L1 L2 [options] +# +# see argument descriptions under ufs above +# +# +# ==== The diskd store type ==== +# +# "diskd" uses the same storage format as "ufs", utilizing a +# separate process to avoid blocking the main Squid process on +# disk-I/O. +# +# Usage: +# cache_dir diskd Directory-Name Mbytes L1 L2 [options] [Q1=n] [Q2=n] +# +# see argument descriptions under ufs above +# +# Q1 specifies the number of unacknowledged I/O requests when Squid +# stops opening new files. If this many messages are in the queues, +# Squid won't open new files. Default is 64 +# +# Q2 specifies the number of unacknowledged messages when Squid +# starts blocking. If this many messages are in the queues, +# Squid blocks until it receives some replies. Default is 72 +# +# When Q1 < Q2 (the default), the cache directory is optimized +# for lower response time at the expense of a decrease in hit +# ratio. If Q1 > Q2, the cache directory is optimized for +# higher hit ratio at the expense of an increase in response +# time. +# +# +# ==== The rock store type ==== +# +# Usage: +# cache_dir rock Directory-Name Mbytes [options] +# +# The Rock Store type is a database-style storage. All cached +# entries are stored in a "database" file, using fixed-size slots. +# A single entry occupies one or more slots. +# +# If possible, Squid using Rock Store creates a dedicated kid +# process called "disker" to avoid blocking Squid worker(s) on disk +# I/O. One disker kid is created for each rock cache_dir. Diskers +# are created only when Squid, running in daemon mode, has support +# for the IpcIo disk I/O module. +# +# swap-timeout=msec: Squid will not start writing a miss to or +# reading a hit from disk if it estimates that the swap operation +# will take more than the specified number of milliseconds. By +# default and when set to zero, disables the disk I/O time limit +# enforcement. Ignored when using blocking I/O module because +# blocking synchronous I/O does not allow Squid to estimate the +# expected swap wait time. +# +# max-swap-rate=swaps/sec: Artificially limits disk access using +# the specified I/O rate limit. Swap out requests that +# would cause the average I/O rate to exceed the limit are +# delayed. Individual swap in requests (i.e., hits or reads) are +# not delayed, but they do contribute to measured swap rate and +# since they are placed in the same FIFO queue as swap out +# requests, they may wait longer if max-swap-rate is smaller. +# This is necessary on file systems that buffer "too +# many" writes and then start blocking Squid and other processes +# while committing those writes to disk. Usually used together +# with swap-timeout to avoid excessive delays and queue overflows +# when disk demand exceeds available disk "bandwidth". By default +# and when set to zero, disables the disk I/O rate limit +# enforcement. Currently supported by IpcIo module only. +# +# slot-size=bytes: The size of a database "record" used for +# storing cached responses. A cached response occupies at least +# one slot and all database I/O is done using individual slots so +# increasing this parameter leads to more disk space waste while +# decreasing it leads to more disk I/O overheads. Should be a +# multiple of your operating system I/O page size. Defaults to +# 16KBytes. A housekeeping header is stored with each slot and +# smaller slot-sizes will be rejected. The header is smaller than +# 100 bytes. +# +# +# ==== COMMON OPTIONS ==== +# +# no-store no new objects should be stored to this cache_dir. +# +# min-size=n the minimum object size in bytes this cache_dir +# will accept. It's used to restrict a cache_dir +# to only store large objects (e.g. AUFS) while +# other stores are optimized for smaller objects +# (e.g. Rock). +# Defaults to 0. +# +# max-size=n the maximum object size in bytes this cache_dir +# supports. +# The value in maximum_object_size directive sets +# the default unless more specific details are +# available (ie a small store capacity). +# +# Note: To make optimal use of the max-size limits you should order +# the cache_dir lines with the smallest max-size value first. +# +#Default: +# No disk cache. Store cache ojects only in memory. +# + +# Uncomment and adjust the following to add a disk cache directory. +#cache_dir ufs /var/spool/squid 100 16 256 + +# TAG: store_dir_select_algorithm +# How Squid selects which cache_dir to use when the response +# object will fit into more than one. +# +# Regardless of which algorithm is used the cache_dir min-size +# and max-size parameters are obeyed. As such they can affect +# the selection algorithm by limiting the set of considered +# cache_dir. +# +# Algorithms: +# +# least-load +# +# This algorithm is suited to caches with similar cache_dir +# sizes and disk speeds. +# +# The disk with the least I/O pending is selected. +# When there are multiple disks with the same I/O load ranking +# the cache_dir with most available capacity is selected. +# +# When a mix of cache_dir sizes are configured the faster disks +# have a naturally lower I/O loading and larger disks have more +# capacity. So space used to store objects and data throughput +# may be very unbalanced towards larger disks. +# +# +# round-robin +# +# This algorithm is suited to caches with unequal cache_dir +# disk sizes. +# +# Each cache_dir is selected in a rotation. The next suitable +# cache_dir is used. +# +# Available cache_dir capacity is only considered in relation +# to whether the object will fit and meets the min-size and +# max-size parameters. +# +# Disk I/O loading is only considered to prevent overload on slow +# disks. This algorithm does not spread objects by size, so any +# I/O loading per-disk may appear very unbalanced and volatile. +# +# If several cache_dirs use similar min-size, max-size, or other +# limits to to reject certain responses, then do not group such +# cache_dir lines together, to avoid round-robin selection bias +# towards the first cache_dir after the group. Instead, interleave +# cache_dir lines from different groups. For example: +# +# store_dir_select_algorithm round-robin +# cache_dir rock /hdd1 ... min-size=100000 +# cache_dir rock /ssd1 ... max-size=99999 +# cache_dir rock /hdd2 ... min-size=100000 +# cache_dir rock /ssd2 ... max-size=99999 +# cache_dir rock /hdd3 ... min-size=100000 +# cache_dir rock /ssd3 ... max-size=99999 +#Default: +# store_dir_select_algorithm least-load + +# TAG: max_open_disk_fds +# To avoid having disk as the I/O bottleneck Squid can optionally +# bypass the on-disk cache if more than this amount of disk file +# descriptors are open. +# +# A value of 0 indicates no limit. +#Default: +# no limit + +# TAG: cache_swap_low (percent, 0-100) +# The low-water mark for AUFS/UFS/diskd cache object eviction by +# the cache_replacement_policy algorithm. +# +# Removal begins when the swap (disk) usage of a cache_dir is +# above this low-water mark and attempts to maintain utilization +# near the low-water mark. +# +# As swap utilization increases towards the high-water mark set +# by cache_swap_high object eviction becomes more agressive. +# +# The value difference in percentages between low- and high-water +# marks represent an eviction rate of 300 objects per second and +# the rate continues to scale in agressiveness by multiples of +# this above the high-water mark. +# +# Defaults are 90% and 95%. If you have a large cache, 5% could be +# hundreds of MB. If this is the case you may wish to set these +# numbers closer together. +# +# See also cache_swap_high and cache_replacement_policy +#Default: +# cache_swap_low 90 + +# TAG: cache_swap_high (percent, 0-100) +# The high-water mark for AUFS/UFS/diskd cache object eviction by +# the cache_replacement_policy algorithm. +# +# Removal begins when the swap (disk) usage of a cache_dir is +# above the low-water mark set by cache_swap_low and attempts to +# maintain utilization near the low-water mark. +# +# As swap utilization increases towards this high-water mark object +# eviction becomes more agressive. +# +# The value difference in percentages between low- and high-water +# marks represent an eviction rate of 300 objects per second and +# the rate continues to scale in agressiveness by multiples of +# this above the high-water mark. +# +# Defaults are 90% and 95%. If you have a large cache, 5% could be +# hundreds of MB. If this is the case you may wish to set these +# numbers closer together. +# +# See also cache_swap_low and cache_replacement_policy +#Default: +# cache_swap_high 95 + +# LOGFILE OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: logformat +# Usage: +# +# logformat +# +# Defines an access log format. +# +# The is a string with embedded % format codes +# +# % format codes all follow the same basic structure where all but +# the formatcode is optional. Output strings are automatically escaped +# as required according to their context and the output format +# modifiers are usually not needed, but can be specified if an explicit +# output format is desired. +# +# % ["|[|'|#] [-] [[0]width] [{argument}] formatcode +# +# " output in quoted string format +# [ output in squid text log format as used by log_mime_hdrs +# # output in URL quoted format +# ' output as-is +# +# - left aligned +# +# width minimum and/or maximum field width: +# [width_min][.width_max] +# When minimum starts with 0, the field is zero-padded. +# String values exceeding maximum width are truncated. +# +# {arg} argument such as header name etc +# +# Format codes: +# +# % a literal % character +# sn Unique sequence number per log line entry +# err_code The ID of an error response served by Squid or +# a similar internal error identifier. +# err_detail Additional err_code-dependent error information. +# note The annotation specified by the argument. Also +# logs the adaptation meta headers set by the +# adaptation_meta configuration parameter. +# If no argument given all annotations logged. +# The argument may include a separator to use with +# annotation values: +# name[:separator] +# By default, multiple note values are separated with "," +# and multiple notes are separated with "\r\n". +# When logging named notes with %{name}note, the +# explicitly configured separator is used between note +# values. When logging all notes with %note, the +# explicitly configured separator is used between +# individual notes. There is currently no way to +# specify both value and notes separators when logging +# all notes with %note. +# +# Connection related format codes: +# +# >a Client source IP address +# >A Client FQDN +# >p Client source port +# >eui Client source EUI (MAC address, EUI-48 or EUI-64 identifier) +# >la Local IP address the client connected to +# >lp Local port number the client connected to +# >qos Client connection TOS/DSCP value set by Squid +# >nfmark Client connection netfilter mark set by Squid +# +# la Local listening IP address the client connection was connected to. +# lp Local listening port number the client connection was connected to. +# +# . format. +# Currently, Squid considers the master transaction +# started when a complete HTTP request header initiating +# the transaction is received from the client. This is +# the same value that Squid uses to calculate transaction +# response time when logging %tr to access.log. Currently, +# Squid uses millisecond resolution for %tS values, +# similar to the default access.log "current time" field +# (%ts.%03tu). +# +# Access Control related format codes: +# +# et Tag returned by external acl +# ea Log string returned by external acl +# un User name (any available) +# ul User name from authentication +# ue User name from external acl helper +# ui User name from ident +# un A user name. Expands to the first available name +# from the following list of information sources: +# - authenticated user name, like %ul +# - user name supplied by an external ACL, like %ue +# - SSL client name, like %us +# - ident user name, like %ui +# credentials Client credentials. The exact meaning depends on +# the authentication scheme: For Basic authentication, +# it is the password; for Digest, the realm sent by the +# client; for NTLM and Negotiate, the client challenge +# or client credentials prefixed with "YR " or "KK ". +# +# HTTP related format codes: +# +# REQUEST +# +# [http::]rm Request method (GET/POST etc) +# [http::]>rm Request method from client +# [http::]ru Request URL from client +# [http::]rs Request URL scheme from client +# [http::]rd Request URL domain from client +# [http::]rP Request URL port from client +# [http::]rp Request URL path excluding hostname from client +# [http::]rv Request protocol version from client +# [http::]h Original received request header. +# Usually differs from the request header sent by +# Squid, although most fields are often preserved. +# Accepts optional header field name/value filter +# argument using name[:[separator]element] format. +# [http::]>ha Received request header after adaptation and +# redirection (pre-cache REQMOD vectoring point). +# Usually differs from the request header sent by +# Squid, although most fields are often preserved. +# Optional header name argument as for >h +# +# +# RESPONSE +# +# [http::]Hs HTTP status code sent to the client +# +# [http::]h +# +# [http::]mt MIME content type +# +# +# SIZE COUNTERS +# +# [http::]st Total size of request + reply traffic with client +# [http::]>st Total size of request received from client. +# Excluding chunked encoding bytes. +# [http::]sh Size of request headers received from client +# [http::]sni SSL client SNI sent to Squid. Available only +# after the peek, stare, or splice SSL bumping +# actions. +# +# If ICAP is enabled, the following code becomes available (as +# well as ICAP log codes documented with the icap_log option): +# +# icap::tt Total ICAP processing time for the HTTP +# transaction. The timer ticks when ICAP +# ACLs are checked and when ICAP +# transaction is in progress. +# +# If adaptation is enabled the following three codes become available: +# +# adapt::cert_subject The Subject field of the received client +# SSL certificate or a dash ('-') if Squid has +# received an invalid/malformed certificate or +# no certificate at all. Consider encoding the +# logged value because Subject often has spaces. +# +# %ssl::>cert_issuer The Issuer field of the received client +# SSL certificate or a dash ('-') if Squid has +# received an invalid/malformed certificate or +# no certificate at all. Consider encoding the +# logged value because Issuer often has spaces. +# +# The default formats available (which do not need re-defining) are: +# +#logformat squid %ts.%03tu %6tr %>a %Ss/%03>Hs %a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %h" "%{User-Agent}>h" %Ss:%Sh +#logformat referrer %ts.%03tu %>a %{Referer}>h %ru +#logformat useragent %>a [%tl] "%{User-Agent}>h" +# +# NOTE: When the log_mime_hdrs directive is set to ON. +# The squid, common and combined formats have a safely encoded copy +# of the mime headers appended to each line within a pair of brackets. +# +# NOTE: The common and combined formats are not quite true to the Apache definition. +# The logs from Squid contain an extra status and hierarchy code appended. +# +#Default: +# The format definitions squid, common, combined, referrer, useragent are built in. + +# TAG: access_log +# Configures whether and how Squid logs HTTP and ICP transactions. +# If access logging is enabled, a single line is logged for every +# matching HTTP or ICP request. The recommended directive formats are: +# +# access_log : [option ...] [acl acl ...] +# access_log none [acl acl ...] +# +# The following directive format is accepted but may be deprecated: +# access_log : [ [acl acl ...]] +# +# In most cases, the first ACL name must not contain the '=' character +# and should not be equal to an existing logformat name. You can always +# start with an 'all' ACL to work around those restrictions. +# +# Will log to the specified module:place using the specified format (which +# must be defined in a logformat directive) those entries which match +# ALL the acl's specified (which must be defined in acl clauses). +# If no acl is specified, all requests will be logged to this destination. +# +# ===== Available options for the recommended directive format ===== +# +# logformat=name Names log line format (either built-in or +# defined by a logformat directive). Defaults +# to 'squid'. +# +# buffer-size=64KB Defines approximate buffering limit for log +# records (see buffered_logs). Squid should not +# keep more than the specified size and, hence, +# should flush records before the buffer becomes +# full to avoid overflows under normal +# conditions (the exact flushing algorithm is +# module-dependent though). The on-error option +# controls overflow handling. +# +# on-error=die|drop Defines action on unrecoverable errors. The +# 'drop' action ignores (i.e., does not log) +# affected log records. The default 'die' action +# kills the affected worker. The drop action +# support has not been tested for modules other +# than tcp. +# +# ===== Modules Currently available ===== +# +# none Do not log any requests matching these ACL. +# Do not specify Place or logformat name. +# +# stdio Write each log line to disk immediately at the completion of +# each request. +# Place: the filename and path to be written. +# +# daemon Very similar to stdio. But instead of writing to disk the log +# line is passed to a daemon helper for asychronous handling instead. +# Place: varies depending on the daemon. +# +# log_file_daemon Place: the file name and path to be written. +# +# syslog To log each request via syslog facility. +# Place: The syslog facility and priority level for these entries. +# Place Format: facility.priority +# +# where facility could be any of: +# authpriv, daemon, local0 ... local7 or user. +# +# And priority could be any of: +# err, warning, notice, info, debug. +# +# udp To send each log line as text data to a UDP receiver. +# Place: The destination host name or IP and port. +# Place Format: //host:port +# +# tcp To send each log line as text data to a TCP receiver. +# Lines may be accumulated before sending (see buffered_logs). +# Place: The destination host name or IP and port. +# Place Format: //host:port +# +# Default: +# access_log daemon:/var/log/squid/access.log squid +#Default: +# access_log daemon:/var/log/squid/access.log squid + +# TAG: icap_log +# ICAP log files record ICAP transaction summaries, one line per +# transaction. +# +# The icap_log option format is: +# icap_log [ [acl acl ...]] +# icap_log none [acl acl ...]] +# +# Please see access_log option documentation for details. The two +# kinds of logs share the overall configuration approach and many +# features. +# +# ICAP processing of a single HTTP message or transaction may +# require multiple ICAP transactions. In such cases, multiple +# ICAP transaction log lines will correspond to a single access +# log line. +# +# ICAP log supports many access.log logformat %codes. In ICAP context, +# HTTP message-related %codes are applied to the HTTP message embedded +# in an ICAP message. Logformat "%http::>..." codes are used for HTTP +# messages embedded in ICAP requests while "%http::<..." codes are used +# for HTTP messages embedded in ICAP responses. For example: +# +# http::>h To-be-adapted HTTP message headers sent by Squid to +# the ICAP service. For REQMOD transactions, these are +# HTTP request headers. For RESPMOD, these are HTTP +# response headers, but Squid currently cannot log them +# (i.e., %http::>h will expand to "-" for RESPMOD). +# +# http::st The total size of the ICAP request sent to the ICAP +# server (ICAP headers + ICAP body), including chunking +# metadata (if any). +# +# icap::h ICAP request header(s). Similar to >h. +# +# icap::A %icap::to/%03icap::Hs %icap::\n - logfile data +# R\n - rotate file +# T\n - truncate file +# O\n - reopen file +# F\n - flush file +# r\n - set rotate count to +# b\n - 1 = buffer output, 0 = don't buffer output +# +# No responses is expected. +#Default: +# logfile_daemon /usr/lib/squid/log_file_daemon + +# TAG: stats_collection allow|deny acl acl... +# This options allows you to control which requests gets accounted +# in performance counters. +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# Allow logging for all transactions. + +# TAG: cache_store_log +# Logs the activities of the storage manager. Shows which +# objects are ejected from the cache, and which objects are +# saved and for how long. +# There are not really utilities to analyze this data, so you can safely +# disable it (the default). +# +# Store log uses modular logging outputs. See access_log for the list +# of modules supported. +# +# Example: +# cache_store_log stdio:/var/log/squid/store.log +# cache_store_log daemon:/var/log/squid/store.log +#Default: +# none + +# TAG: cache_swap_state +# Location for the cache "swap.state" file. This index file holds +# the metadata of objects saved on disk. It is used to rebuild +# the cache during startup. Normally this file resides in each +# 'cache_dir' directory, but you may specify an alternate +# pathname here. Note you must give a full filename, not just +# a directory. Since this is the index for the whole object +# list you CANNOT periodically rotate it! +# +# If %s can be used in the file name it will be replaced with a +# a representation of the cache_dir name where each / is replaced +# with '.'. This is needed to allow adding/removing cache_dir +# lines when cache_swap_log is being used. +# +# If have more than one 'cache_dir', and %s is not used in the name +# these swap logs will have names such as: +# +# cache_swap_log.00 +# cache_swap_log.01 +# cache_swap_log.02 +# +# The numbered extension (which is added automatically) +# corresponds to the order of the 'cache_dir' lines in this +# configuration file. If you change the order of the 'cache_dir' +# lines in this file, these index files will NOT correspond to +# the correct 'cache_dir' entry (unless you manually rename +# them). We recommend you do NOT use this option. It is +# better to keep these index files in each 'cache_dir' directory. +#Default: +# Store the journal inside its cache_dir + +# TAG: logfile_rotate +# Specifies the number of logfile rotations to make when you +# type 'squid -k rotate'. The default is 10, which will rotate +# with extensions 0 through 9. Setting logfile_rotate to 0 will +# disable the file name rotation, but the logfiles are still closed +# and re-opened. This will enable you to rename the logfiles +# yourself just before sending the rotate signal. +# +# Note, the 'squid -k rotate' command normally sends a USR1 +# signal to the running squid process. In certain situations +# (e.g. on Linux with Async I/O), USR1 is used for other +# purposes, so -k rotate uses another signal. It is best to get +# in the habit of using 'squid -k rotate' instead of 'kill -USR1 +# '. +# +# Note, from Squid-3.1 this option is only a default for cache.log, +# that log can be rotated separately by using debug_options. +# +# Note2, for Debian/Linux the default of logfile_rotate is +# zero, since it includes external logfile-rotation methods. +#Default: +# logfile_rotate 0 + +# TAG: mime_table +# Path to Squid's icon configuration file. +# +# You shouldn't need to change this, but the default file contains +# examples and formatting information if you do. +#Default: +# mime_table /usr/share/squid/mime.conf + +# TAG: log_mime_hdrs on|off +# The Cache can record both the request and the response MIME +# headers for each HTTP transaction. The headers are encoded +# safely and will appear as two bracketed fields at the end of +# the access log (for either the native or httpd-emulated log +# formats). To enable this logging set log_mime_hdrs to 'on'. +#Default: +# log_mime_hdrs off + +# TAG: pid_filename +# A filename to write the process-id to. To disable, enter "none". +#Default: +# pid_filename /var/run/squid.pid + +# TAG: client_netmask +# A netmask for client addresses in logfiles and cachemgr output. +# Change this to protect the privacy of your cache clients. +# A netmask of 255.255.255.0 will log all IP's in that range with +# the last digit set to '0'. +#Default: +# Log full client IP address + +# TAG: strip_query_terms +# By default, Squid strips query terms from requested URLs before +# logging. This protects your user's privacy and reduces log size. +# +# When investigating HIT/MISS or other caching behaviour you +# will need to disable this to see the full URL used by Squid. +#Default: +# strip_query_terms on + +# TAG: buffered_logs on|off +# Whether to write/send access_log records ASAP or accumulate them and +# then write/send them in larger chunks. Buffering may improve +# performance because it decreases the number of I/Os. However, +# buffering increases the delay before log records become available to +# the final recipient (e.g., a disk file or logging daemon) and, +# hence, increases the risk of log records loss. +# +# Note that even when buffered_logs are off, Squid may have to buffer +# records if it cannot write/send them immediately due to pending I/Os +# (e.g., the I/O writing the previous log record) or connectivity loss. +# +# Currently honored by 'daemon' and 'tcp' access_log modules only. +#Default: +# buffered_logs off + +# TAG: netdb_filename +# Where Squid stores it's netdb journal. +# When enabled this journal preserves netdb state between restarts. +# +# To disable, enter "none". +#Default: +# netdb_filename stdio:/var/log/squid/netdb.state + +# OPTIONS FOR TROUBLESHOOTING +# ----------------------------------------------------------------------------- + +# TAG: cache_log +# Squid administrative logging file. +# +# This is where general information about Squid behavior goes. You can +# increase the amount of data logged to this file and how often it is +# rotated with "debug_options" +#Default: +# cache_log /var/log/squid/cache.log + +# TAG: debug_options +# Logging options are set as section,level where each source file +# is assigned a unique section. Lower levels result in less +# output, Full debugging (level 9) can result in a very large +# log file, so be careful. +# +# The magic word "ALL" sets debugging levels for all sections. +# The default is to run with "ALL,1" to record important warnings. +# +# The rotate=N option can be used to keep more or less of these logs +# than would otherwise be kept by logfile_rotate. +# For most uses a single log should be enough to monitor current +# events affecting Squid. +#Default: +# Log all critical and important messages. + +# TAG: coredump_dir +# By default Squid leaves core files in the directory from where +# it was started. If you set 'coredump_dir' to a directory +# that exists, Squid will chdir() to that directory at startup +# and coredump files will be left there. +# +#Default: +# Use the directory from where Squid was started. +# + +# Leave coredumps in the first cache dir +coredump_dir /var/spool/squid + +# OPTIONS FOR FTP GATEWAYING +# ----------------------------------------------------------------------------- + +# TAG: ftp_user +# If you want the anonymous login password to be more informative +# (and enable the use of picky FTP servers), set this to something +# reasonable for your domain, like wwwuser@somewhere.net +# +# The reason why this is domainless by default is the +# request can be made on the behalf of a user in any domain, +# depending on how the cache is used. +# Some FTP server also validate the email address is valid +# (for example perl.com). +#Default: +# ftp_user Squid@ + +# TAG: ftp_passive +# If your firewall does not allow Squid to use passive +# connections, turn off this option. +# +# Use of ftp_epsv_all option requires this to be ON. +#Default: +# ftp_passive on + +# TAG: ftp_epsv_all +# FTP Protocol extensions permit the use of a special "EPSV ALL" command. +# +# NATs may be able to put the connection on a "fast path" through the +# translator, as the EPRT command will never be used and therefore, +# translation of the data portion of the segments will never be needed. +# +# When a client only expects to do two-way FTP transfers this may be +# useful. +# If squid finds that it must do a three-way FTP transfer after issuing +# an EPSV ALL command, the FTP session will fail. +# +# If you have any doubts about this option do not use it. +# Squid will nicely attempt all other connection methods. +# +# Requires ftp_passive to be ON (default) for any effect. +#Default: +# ftp_epsv_all off + +# TAG: ftp_epsv +# FTP Protocol extensions permit the use of a special "EPSV" command. +# +# NATs may be able to put the connection on a "fast path" through the +# translator using EPSV, as the EPRT command will never be used +# and therefore, translation of the data portion of the segments +# will never be needed. +# +# EPSV is often required to interoperate with FTP servers on IPv6 +# networks. On the other hand, it may break some IPv4 servers. +# +# By default, EPSV may try EPSV with any FTP server. To fine tune +# that decision, you may restrict EPSV to certain clients or servers +# using ACLs: +# +# ftp_epsv allow|deny al1 acl2 ... +# +# WARNING: Disabling EPSV may cause problems with external NAT and IPv6. +# +# Only fast ACLs are supported. +# Requires ftp_passive to be ON (default) for any effect. +#Default: +# none + +# TAG: ftp_eprt +# FTP Protocol extensions permit the use of a special "EPRT" command. +# +# This extension provides a protocol neutral alternative to the +# IPv4-only PORT command. When supported it enables active FTP data +# channels over IPv6 and efficient NAT handling. +# +# Turning this OFF will prevent EPRT being attempted and will skip +# straight to using PORT for IPv4 servers. +# +# Some devices are known to not handle this extension correctly and +# may result in crashes. Devices which suport EPRT enough to fail +# cleanly will result in Squid attempting PORT anyway. This directive +# should only be disabled when EPRT results in device failures. +# +# WARNING: Doing so will convert Squid back to the old behavior with all +# the related problems with external NAT devices/layers and IPv4-only FTP. +#Default: +# ftp_eprt on + +# TAG: ftp_sanitycheck +# For security and data integrity reasons Squid by default performs +# sanity checks of the addresses of FTP data connections ensure the +# data connection is to the requested server. If you need to allow +# FTP connections to servers using another IP address for the data +# connection turn this off. +#Default: +# ftp_sanitycheck on + +# TAG: ftp_telnet_protocol +# The FTP protocol is officially defined to use the telnet protocol +# as transport channel for the control connection. However, many +# implementations are broken and does not respect this aspect of +# the FTP protocol. +# +# If you have trouble accessing files with ASCII code 255 in the +# path or similar problems involving this ASCII code you can +# try setting this directive to off. If that helps, report to the +# operator of the FTP server in question that their FTP server +# is broken and does not follow the FTP standard. +#Default: +# ftp_telnet_protocol on + +# OPTIONS FOR EXTERNAL SUPPORT PROGRAMS +# ----------------------------------------------------------------------------- + +# TAG: diskd_program +# Specify the location of the diskd executable. +# Note this is only useful if you have compiled in +# diskd as one of the store io modules. +#Default: +# diskd_program /usr/lib/squid/diskd + +# TAG: unlinkd_program +# Specify the location of the executable for file deletion process. +#Default: +# unlinkd_program /usr/lib/squid/unlinkd + +# TAG: pinger_program +# Specify the location of the executable for the pinger process. +#Default: +# pinger_program /usr/lib/squid/pinger + +# TAG: pinger_enable +# Control whether the pinger is active at run-time. +# Enables turning ICMP pinger on and off with a simple +# squid -k reconfigure. +#Default: +# pinger_enable on + +# OPTIONS FOR URL REWRITING +# ----------------------------------------------------------------------------- + +# TAG: url_rewrite_program +# Specify the location of the executable URL rewriter to use. +# Since they can perform almost any function there isn't one included. +# +# For each requested URL, the rewriter will receive on line with the format +# +# [channel-ID ] URL [ extras] +# +# See url_rewrite_extras on how to send "extras" with optional values to +# the helper. +# After processing the request the helper must reply using the following format: +# +# [channel-ID ] result [ kv-pairs] +# +# The result code can be: +# +# OK status=30N url="..." +# Redirect the URL to the one supplied in 'url='. +# 'status=' is optional and contains the status code to send +# the client in Squids HTTP response. It must be one of the +# HTTP redirect status codes: 301, 302, 303, 307, 308. +# When no status is given Squid will use 302. +# +# OK rewrite-url="..." +# Rewrite the URL to the one supplied in 'rewrite-url='. +# The new URL is fetched directly by Squid and returned to +# the client as the response to its request. +# +# OK +# When neither of url= and rewrite-url= are sent Squid does +# not change the URL. +# +# ERR +# Do not change the URL. +# +# BH +# An internal error occurred in the helper, preventing +# a result being identified. The 'message=' key name is +# reserved for delivering a log message. +# +# +# In addition to the above kv-pairs Squid also understands the following +# optional kv-pairs received from URL rewriters: +# clt_conn_tag=TAG +# Associates a TAG with the client TCP connection. +# The TAG is treated as a regular annotation but persists across +# future requests on the client connection rather than just the +# current request. A helper may update the TAG during subsequent +# requests be returning a new kv-pair. +# +# When using the concurrency= option the protocol is changed by +# introducing a query channel tag in front of the request/response. +# The query channel tag is a number between 0 and concurrency-1. +# This value must be echoed back unchanged to Squid as the first part +# of the response relating to its request. +# +# WARNING: URL re-writing ability should be avoided whenever possible. +# Use the URL redirect form of response instead. +# +# Re-write creates a difference in the state held by the client +# and server. Possibly causing confusion when the server response +# contains snippets of its view state. Embeded URLs, response +# and content Location headers, etc. are not re-written by this +# interface. +# +# By default, a URL rewriter is not used. +#Default: +# none + +# TAG: url_rewrite_children +# The maximum number of redirector processes to spawn. If you limit +# it too few Squid will have to wait for them to process a backlog of +# URLs, slowing it down. If you allow too many they will use RAM +# and other system resources noticably. +# +# The startup= and idle= options allow some measure of skew in your +# tuning. +# +# startup= +# +# Sets a minimum of how many processes are to be spawned when Squid +# starts or reconfigures. When set to zero the first request will +# cause spawning of the first child process to handle it. +# +# Starting too few will cause an initial slowdown in traffic as Squid +# attempts to simultaneously spawn enough processes to cope. +# +# idle= +# +# Sets a minimum of how many processes Squid is to try and keep available +# at all times. When traffic begins to rise above what the existing +# processes can handle this many more will be spawned up to the maximum +# configured. A minimum setting of 1 is required. +# +# concurrency= +# +# The number of requests each redirector helper can handle in +# parallel. Defaults to 0 which indicates the redirector +# is a old-style single threaded redirector. +# +# When this directive is set to a value >= 1 then the protocol +# used to communicate with the helper is modified to include +# an ID in front of the request/response. The ID from the request +# must be echoed back with the response to that request. +#Default: +# url_rewrite_children 20 startup=0 idle=1 concurrency=0 + +# TAG: url_rewrite_host_header +# To preserve same-origin security policies in browsers and +# prevent Host: header forgery by redirectors Squid rewrites +# any Host: header in redirected requests. +# +# If you are running an accelerator this may not be a wanted +# effect of a redirector. This directive enables you disable +# Host: alteration in reverse-proxy traffic. +# +# WARNING: Entries are cached on the result of the URL rewriting +# process, so be careful if you have domain-virtual hosts. +# +# WARNING: Squid and other software verifies the URL and Host +# are matching, so be careful not to relay through other proxies +# or inspecting firewalls with this disabled. +#Default: +# url_rewrite_host_header on + +# TAG: url_rewrite_access +# If defined, this access list specifies which requests are +# sent to the redirector processes. +# +# This clause supports both fast and slow acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# Allow, unless rules exist in squid.conf. + +# TAG: url_rewrite_bypass +# When this is 'on', a request will not go through the +# redirector if all the helpers are busy. If this is 'off' +# and the redirector queue grows too large, Squid will exit +# with a FATAL error and ask you to increase the number of +# redirectors. You should only enable this if the redirectors +# are not critical to your caching system. If you use +# redirectors for access control, and you enable this option, +# users may have access to pages they should not +# be allowed to request. +#Default: +# url_rewrite_bypass off + +# TAG: url_rewrite_extras +# Specifies a string to be append to request line format for the +# rewriter helper. "Quoted" format values may contain spaces and +# logformat %macros. In theory, any logformat %macro can be used. +# In practice, a %macro expands as a dash (-) if the helper request is +# sent before the required macro information is available to Squid. +#Default: +# url_rewrite_extras "%>a/%>A %un %>rm myip=%la myport=%lp" + +# OPTIONS FOR STORE ID +# ----------------------------------------------------------------------------- + +# TAG: store_id_program +# Specify the location of the executable StoreID helper to use. +# Since they can perform almost any function there isn't one included. +# +# For each requested URL, the helper will receive one line with the format +# +# [channel-ID ] URL [ extras] +# +# +# After processing the request the helper must reply using the following format: +# +# [channel-ID ] result [ kv-pairs] +# +# The result code can be: +# +# OK store-id="..." +# Use the StoreID supplied in 'store-id='. +# +# ERR +# The default is to use HTTP request URL as the store ID. +# +# BH +# An internal error occured in the helper, preventing +# a result being identified. +# +# In addition to the above kv-pairs Squid also understands the following +# optional kv-pairs received from URL rewriters: +# clt_conn_tag=TAG +# Associates a TAG with the client TCP connection. +# Please see url_rewrite_program related documentation for this +# kv-pair +# +# Helper programs should be prepared to receive and possibly ignore +# additional whitespace-separated tokens on each input line. +# +# When using the concurrency= option the protocol is changed by +# introducing a query channel tag in front of the request/response. +# The query channel tag is a number between 0 and concurrency-1. +# This value must be echoed back unchanged to Squid as the first part +# of the response relating to its request. +# +# NOTE: when using StoreID refresh_pattern will apply to the StoreID +# returned from the helper and not the URL. +# +# WARNING: Wrong StoreID value returned by a careless helper may result +# in the wrong cached response returned to the user. +# +# By default, a StoreID helper is not used. +#Default: +# none + +# TAG: store_id_extras +# Specifies a string to be append to request line format for the +# StoreId helper. "Quoted" format values may contain spaces and +# logformat %macros. In theory, any logformat %macro can be used. +# In practice, a %macro expands as a dash (-) if the helper request is +# sent before the required macro information is available to Squid. +#Default: +# store_id_extras "%>a/%>A %un %>rm myip=%la myport=%lp" + +# TAG: store_id_children +# The maximum number of StoreID helper processes to spawn. If you limit +# it too few Squid will have to wait for them to process a backlog of +# requests, slowing it down. If you allow too many they will use RAM +# and other system resources noticably. +# +# The startup= and idle= options allow some measure of skew in your +# tuning. +# +# startup= +# +# Sets a minimum of how many processes are to be spawned when Squid +# starts or reconfigures. When set to zero the first request will +# cause spawning of the first child process to handle it. +# +# Starting too few will cause an initial slowdown in traffic as Squid +# attempts to simultaneously spawn enough processes to cope. +# +# idle= +# +# Sets a minimum of how many processes Squid is to try and keep available +# at all times. When traffic begins to rise above what the existing +# processes can handle this many more will be spawned up to the maximum +# configured. A minimum setting of 1 is required. +# +# concurrency= +# +# The number of requests each storeID helper can handle in +# parallel. Defaults to 0 which indicates the helper +# is a old-style single threaded program. +# +# When this directive is set to a value >= 1 then the protocol +# used to communicate with the helper is modified to include +# an ID in front of the request/response. The ID from the request +# must be echoed back with the response to that request. +#Default: +# store_id_children 20 startup=0 idle=1 concurrency=0 + +# TAG: store_id_access +# If defined, this access list specifies which requests are +# sent to the StoreID processes. By default all requests +# are sent. +# +# This clause supports both fast and slow acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# Allow, unless rules exist in squid.conf. + +# TAG: store_id_bypass +# When this is 'on', a request will not go through the +# helper if all helpers are busy. If this is 'off' +# and the helper queue grows too large, Squid will exit +# with a FATAL error and ask you to increase the number of +# helpers. You should only enable this if the helperss +# are not critical to your caching system. If you use +# helpers for critical caching components, and you enable this +# option, users may not get objects from cache. +#Default: +# store_id_bypass on + +# OPTIONS FOR TUNING THE CACHE +# ----------------------------------------------------------------------------- + +# TAG: cache +# Requests denied by this directive will not be served from the cache +# and their responses will not be stored in the cache. This directive +# has no effect on other transactions and on already cached responses. +# +# This clause supports both fast and slow acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +# This and the two other similar caching directives listed below are +# checked at different transaction processing stages, have different +# access to response information, affect different cache operations, +# and differ in slow ACLs support: +# +# * cache: Checked before Squid makes a hit/miss determination. +# No access to reply information! +# Denies both serving a hit and storing a miss. +# Supports both fast and slow ACLs. +# * send_hit: Checked after a hit was detected. +# Has access to reply (hit) information. +# Denies serving a hit only. +# Supports fast ACLs only. +# * store_miss: Checked before storing a cachable miss. +# Has access to reply (miss) information. +# Denies storing a miss only. +# Supports fast ACLs only. +# +# If you are not sure which of the three directives to use, apply the +# following decision logic: +# +# * If your ACL(s) are of slow type _and_ need response info, redesign. +# Squid does not support that particular combination at this time. +# Otherwise: +# * If your directive ACL(s) are of slow type, use "cache"; and/or +# * if your directive ACL(s) need no response info, use "cache". +# Otherwise: +# * If you do not want the response cached, use store_miss; and/or +# * if you do not want a hit on a cached response, use send_hit. +#Default: +# By default, this directive is unused and has no effect. + +# TAG: send_hit +# Responses denied by this directive will not be served from the cache +# (but may still be cached, see store_miss). This directive has no +# effect on the responses it allows and on the cached objects. +# +# Please see the "cache" directive for a summary of differences among +# store_miss, send_hit, and cache directives. +# +# Unlike the "cache" directive, send_hit only supports fast acl +# types. See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +# For example: +# +# # apply custom Store ID mapping to some URLs +# acl MapMe dstdomain .c.example.com +# store_id_program ... +# store_id_access allow MapMe +# +# # but prevent caching of special responses +# # such as 302 redirects that cause StoreID loops +# acl Ordinary http_status 200-299 +# store_miss deny MapMe !Ordinary +# +# # and do not serve any previously stored special responses +# # from the cache (in case they were already cached before +# # the above store_miss rule was in effect). +# send_hit deny MapMe !Ordinary +#Default: +# By default, this directive is unused and has no effect. + +# TAG: store_miss +# Responses denied by this directive will not be cached (but may still +# be served from the cache, see send_hit). This directive has no +# effect on the responses it allows and on the already cached responses. +# +# Please see the "cache" directive for a summary of differences among +# store_miss, send_hit, and cache directives. See the +# send_hit directive for a usage example. +# +# Unlike the "cache" directive, store_miss only supports fast acl +# types. See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# By default, this directive is unused and has no effect. + +# TAG: max_stale time-units +# This option puts an upper limit on how stale content Squid +# will serve from the cache if cache validation fails. +# Can be overriden by the refresh_pattern max-stale option. +#Default: +# max_stale 1 week + +# TAG: refresh_pattern +# usage: refresh_pattern [-i] regex min percent max [options] +# +# By default, regular expressions are CASE-SENSITIVE. To make +# them case-insensitive, use the -i option. +# +# 'Min' is the time (in minutes) an object without an explicit +# expiry time should be considered fresh. The recommended +# value is 0, any higher values may cause dynamic applications +# to be erroneously cached unless the application designer +# has taken the appropriate actions. +# +# 'Percent' is a percentage of the objects age (time since last +# modification age) an object without explicit expiry time +# will be considered fresh. +# +# 'Max' is an upper limit on how long objects without an explicit +# expiry time will be considered fresh. The value is also used +# to form Cache-Control: max-age header for a request sent from +# Squid to origin/parent. +# +# options: override-expire +# override-lastmod +# reload-into-ims +# ignore-reload +# ignore-no-store +# ignore-must-revalidate +# ignore-private +# ignore-auth +# max-stale=NN +# refresh-ims +# store-stale +# +# override-expire enforces min age even if the server +# sent an explicit expiry time (e.g., with the +# Expires: header or Cache-Control: max-age). Doing this +# VIOLATES the HTTP standard. Enabling this feature +# could make you liable for problems which it causes. +# +# Note: override-expire does not enforce staleness - it only extends +# freshness / min. If the server returns a Expires time which +# is longer than your max time, Squid will still consider +# the object fresh for that period of time. +# +# override-lastmod enforces min age even on objects +# that were modified recently. +# +# reload-into-ims changes a client no-cache or ``reload'' +# request for a cached entry into a conditional request using +# If-Modified-Since and/or If-None-Match headers, provided the +# cached entry has a Last-Modified and/or a strong ETag header. +# Doing this VIOLATES the HTTP standard. Enabling this feature +# could make you liable for problems which it causes. +# +# ignore-reload ignores a client no-cache or ``reload'' +# header. Doing this VIOLATES the HTTP standard. Enabling +# this feature could make you liable for problems which +# it causes. +# +# ignore-no-store ignores any ``Cache-control: no-store'' +# headers received from a server. Doing this VIOLATES +# the HTTP standard. Enabling this feature could make you +# liable for problems which it causes. +# +# ignore-must-revalidate ignores any ``Cache-Control: must-revalidate`` +# headers received from a server. Doing this VIOLATES +# the HTTP standard. Enabling this feature could make you +# liable for problems which it causes. +# +# ignore-private ignores any ``Cache-control: private'' +# headers received from a server. Doing this VIOLATES +# the HTTP standard. Enabling this feature could make you +# liable for problems which it causes. +# +# ignore-auth caches responses to requests with authorization, +# as if the originserver had sent ``Cache-control: public'' +# in the response header. Doing this VIOLATES the HTTP standard. +# Enabling this feature could make you liable for problems which +# it causes. +# +# refresh-ims causes squid to contact the origin server +# when a client issues an If-Modified-Since request. This +# ensures that the client will receive an updated version +# if one is available. +# +# store-stale stores responses even if they don't have explicit +# freshness or a validator (i.e., Last-Modified or an ETag) +# present, or if they're already stale. By default, Squid will +# not cache such responses because they usually can't be +# reused. Note that such responses will be stale by default. +# +# max-stale=NN provide a maximum staleness factor. Squid won't +# serve objects more stale than this even if it failed to +# validate the object. Default: use the max_stale global limit. +# +# Basically a cached object is: +# +# FRESH if expire > now, else STALE +# STALE if age > max +# FRESH if lm-factor < percent, else STALE +# FRESH if age < min +# else STALE +# +# The refresh_pattern lines are checked in the order listed here. +# The first entry which matches is used. If none of the entries +# match the default will be used. +# +# Note, you must uncomment all the default lines if you want +# to change one. The default setting is only active if none is +# used. +# +# + +# +# Add any of your own refresh_pattern entries above these. +# +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern ^gopher: 1440 0% 1440 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern (Release|Packages(.gz)*)$ 0 20% 2880 +# example lin deb packages +#refresh_pattern (\.deb|\.udeb)$ 129600 100% 129600 +refresh_pattern . 0 20% 4320 + +# TAG: quick_abort_min (KB) +#Default: +# quick_abort_min 16 KB + +# TAG: quick_abort_max (KB) +#Default: +# quick_abort_max 16 KB + +# TAG: quick_abort_pct (percent) +# The cache by default continues downloading aborted requests +# which are almost completed (less than 16 KB remaining). This +# may be undesirable on slow (e.g. SLIP) links and/or very busy +# caches. Impatient users may tie up file descriptors and +# bandwidth by repeatedly requesting and immediately aborting +# downloads. +# +# When the user aborts a request, Squid will check the +# quick_abort values to the amount of data transferred until +# then. +# +# If the transfer has less than 'quick_abort_min' KB remaining, +# it will finish the retrieval. +# +# If the transfer has more than 'quick_abort_max' KB remaining, +# it will abort the retrieval. +# +# If more than 'quick_abort_pct' of the transfer has completed, +# it will finish the retrieval. +# +# If you do not want any retrieval to continue after the client +# has aborted, set both 'quick_abort_min' and 'quick_abort_max' +# to '0 KB'. +# +# If you want retrievals to always continue if they are being +# cached set 'quick_abort_min' to '-1 KB'. +#Default: +# quick_abort_pct 95 + +# TAG: read_ahead_gap buffer-size +# The amount of data the cache will buffer ahead of what has been +# sent to the client when retrieving an object from another server. +#Default: +# read_ahead_gap 16 KB + +# TAG: negative_ttl time-units +# Set the Default Time-to-Live (TTL) for failed requests. +# Certain types of failures (such as "connection refused" and +# "404 Not Found") are able to be negatively-cached for a short time. +# Modern web servers should provide Expires: header, however if they +# do not this can provide a minimum TTL. +# The default is not to cache errors with unknown expiry details. +# +# Note that this is different from negative caching of DNS lookups. +# +# WARNING: Doing this VIOLATES the HTTP standard. Enabling +# this feature could make you liable for problems which it +# causes. +#Default: +# negative_ttl 0 seconds + +# TAG: positive_dns_ttl time-units +# Upper limit on how long Squid will cache positive DNS responses. +# Default is 6 hours (360 minutes). This directive must be set +# larger than negative_dns_ttl. +#Default: +# positive_dns_ttl 6 hours + +# TAG: negative_dns_ttl time-units +# Time-to-Live (TTL) for negative caching of failed DNS lookups. +# This also sets the lower cache limit on positive lookups. +# Minimum value is 1 second, and it is not recommendable to go +# much below 10 seconds. +#Default: +# negative_dns_ttl 1 minutes + +# TAG: range_offset_limit size [acl acl...] +# usage: (size) [units] [[!]aclname] +# +# Sets an upper limit on how far (number of bytes) into the file +# a Range request may be to cause Squid to prefetch the whole file. +# If beyond this limit, Squid forwards the Range request as it is and +# the result is NOT cached. +# +# This is to stop a far ahead range request (lets say start at 17MB) +# from making Squid fetch the whole object up to that point before +# sending anything to the client. +# +# Multiple range_offset_limit lines may be specified, and they will +# be searched from top to bottom on each request until a match is found. +# The first match found will be used. If no line matches a request, the +# default limit of 0 bytes will be used. +# +# 'size' is the limit specified as a number of units. +# +# 'units' specifies whether to use bytes, KB, MB, etc. +# If no units are specified bytes are assumed. +# +# A size of 0 causes Squid to never fetch more than the +# client requested. (default) +# +# A size of 'none' causes Squid to always fetch the object from the +# beginning so it may cache the result. (2.0 style) +# +# 'aclname' is the name of a defined ACL. +# +# NP: Using 'none' as the byte value here will override any quick_abort settings +# that may otherwise apply to the range request. The range request will +# be fully fetched from start to finish regardless of the client +# actions. This affects bandwidth usage. +#Default: +# none + +# TAG: minimum_expiry_time (seconds) +# The minimum caching time according to (Expires - Date) +# headers Squid honors if the object can't be revalidated. +# The default is 60 seconds. +# +# In reverse proxy environments it might be desirable to honor +# shorter object lifetimes. It is most likely better to make +# your server return a meaningful Last-Modified header however. +# +# In ESI environments where page fragments often have short +# lifetimes, this will often be best set to 0. +#Default: +# minimum_expiry_time 60 seconds + +# TAG: store_avg_object_size (bytes) +# Average object size, used to estimate number of objects your +# cache can hold. The default is 13 KB. +# +# This is used to pre-seed the cache index memory allocation to +# reduce expensive reallocate operations while handling clients +# traffic. Too-large values may result in memory allocation during +# peak traffic, too-small values will result in wasted memory. +# +# Check the cache manager 'info' report metrics for the real +# object sizes seen by your Squid before tuning this. +#Default: +# store_avg_object_size 13 KB + +# TAG: store_objects_per_bucket +# Target number of objects per bucket in the store hash table. +# Lowering this value increases the total number of buckets and +# also the storage maintenance rate. The default is 20. +#Default: +# store_objects_per_bucket 20 + +# HTTP OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: request_header_max_size (KB) +# This specifies the maximum size for HTTP headers in a request. +# Request headers are usually relatively small (about 512 bytes). +# Placing a limit on the request header size will catch certain +# bugs (for example with persistent connections) and possibly +# buffer-overflow or denial-of-service attacks. +#Default: +# request_header_max_size 64 KB + +# TAG: reply_header_max_size (KB) +# This specifies the maximum size for HTTP headers in a reply. +# Reply headers are usually relatively small (about 512 bytes). +# Placing a limit on the reply header size will catch certain +# bugs (for example with persistent connections) and possibly +# buffer-overflow or denial-of-service attacks. +#Default: +# reply_header_max_size 64 KB + +# TAG: request_body_max_size (bytes) +# This specifies the maximum size for an HTTP request body. +# In other words, the maximum size of a PUT/POST request. +# A user who attempts to send a request with a body larger +# than this limit receives an "Invalid Request" error message. +# If you set this parameter to a zero (the default), there will +# be no limit imposed. +# +# See also client_request_buffer_max_size for an alternative +# limitation on client uploads which can be configured. +#Default: +# No limit. + +# TAG: client_request_buffer_max_size (bytes) +# This specifies the maximum buffer size of a client request. +# It prevents squid eating too much memory when somebody uploads +# a large file. +#Default: +# client_request_buffer_max_size 512 KB + +# TAG: broken_posts +# A list of ACL elements which, if matched, causes Squid to send +# an extra CRLF pair after the body of a PUT/POST request. +# +# Some HTTP servers has broken implementations of PUT/POST, +# and rely on an extra CRLF pair sent by some WWW clients. +# +# Quote from RFC2616 section 4.1 on this matter: +# +# Note: certain buggy HTTP/1.0 client implementations generate an +# extra CRLF's after a POST request. To restate what is explicitly +# forbidden by the BNF, an HTTP/1.1 client must not preface or follow +# a request with an extra CRLF. +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +#Example: +# acl buggy_server url_regex ^http://.... +# broken_posts allow buggy_server +#Default: +# Obey RFC 2616. + +# TAG: adaptation_uses_indirect_client on|off +# Controls whether the indirect client IP address (instead of the direct +# client IP address) is passed to adaptation services. +# +# See also: follow_x_forwarded_for adaptation_send_client_ip +#Default: +# adaptation_uses_indirect_client on + +# TAG: via on|off +# If set (default), Squid will include a Via header in requests and +# replies as required by RFC2616. +#Default: +# via on + +# TAG: ie_refresh on|off +# Microsoft Internet Explorer up until version 5.5 Service +# Pack 1 has an issue with transparent proxies, wherein it +# is impossible to force a refresh. Turning this on provides +# a partial fix to the problem, by causing all IMS-REFRESH +# requests from older IE versions to check the origin server +# for fresh content. This reduces hit ratio by some amount +# (~10% in my experience), but allows users to actually get +# fresh content when they want it. Note because Squid +# cannot tell if the user is using 5.5 or 5.5SP1, the behavior +# of 5.5 is unchanged from old versions of Squid (i.e. a +# forced refresh is impossible). Newer versions of IE will, +# hopefully, continue to have the new behavior and will be +# handled based on that assumption. This option defaults to +# the old Squid behavior, which is better for hit ratios but +# worse for clients using IE, if they need to be able to +# force fresh content. +#Default: +# ie_refresh off + +# TAG: vary_ignore_expire on|off +# Many HTTP servers supporting Vary gives such objects +# immediate expiry time with no cache-control header +# when requested by a HTTP/1.0 client. This option +# enables Squid to ignore such expiry times until +# HTTP/1.1 is fully implemented. +# +# WARNING: If turned on this may eventually cause some +# varying objects not intended for caching to get cached. +#Default: +# vary_ignore_expire off + +# TAG: request_entities +# Squid defaults to deny GET and HEAD requests with request entities, +# as the meaning of such requests are undefined in the HTTP standard +# even if not explicitly forbidden. +# +# Set this directive to on if you have clients which insists +# on sending request entities in GET or HEAD requests. But be warned +# that there is server software (both proxies and web servers) which +# can fail to properly process this kind of request which may make you +# vulnerable to cache pollution attacks if enabled. +#Default: +# request_entities off + +# TAG: request_header_access +# Usage: request_header_access header_name allow|deny [!]aclname ... +# +# WARNING: Doing this VIOLATES the HTTP standard. Enabling +# this feature could make you liable for problems which it +# causes. +# +# This option replaces the old 'anonymize_headers' and the +# older 'http_anonymizer' option with something that is much +# more configurable. A list of ACLs for each header name allows +# removal of specific header fields under specific conditions. +# +# This option only applies to outgoing HTTP request headers (i.e., +# headers sent by Squid to the next HTTP hop such as a cache peer +# or an origin server). The option has no effect during cache hit +# detection. The equivalent adaptation vectoring point in ICAP +# terminology is post-cache REQMOD. +# +# The option is applied to individual outgoing request header +# fields. For each request header field F, Squid uses the first +# qualifying sets of request_header_access rules: +# +# 1. Rules with header_name equal to F's name. +# 2. Rules with header_name 'Other', provided F's name is not +# on the hard-coded list of commonly used HTTP header names. +# 3. Rules with header_name 'All'. +# +# Within that qualifying rule set, rule ACLs are checked as usual. +# If ACLs of an "allow" rule match, the header field is allowed to +# go through as is. If ACLs of a "deny" rule match, the header is +# removed and request_header_replace is then checked to identify +# if the removed header has a replacement. If no rules within the +# set have matching ACLs, the header field is left as is. +# +# For example, to achieve the same behavior as the old +# 'http_anonymizer standard' option, you should use: +# +# request_header_access From deny all +# request_header_access Referer deny all +# request_header_access User-Agent deny all +# +# Or, to reproduce the old 'http_anonymizer paranoid' feature +# you should use: +# +# request_header_access Authorization allow all +# request_header_access Proxy-Authorization allow all +# request_header_access Cache-Control allow all +# request_header_access Content-Length allow all +# request_header_access Content-Type allow all +# request_header_access Date allow all +# request_header_access Host allow all +# request_header_access If-Modified-Since allow all +# request_header_access Pragma allow all +# request_header_access Accept allow all +# request_header_access Accept-Charset allow all +# request_header_access Accept-Encoding allow all +# request_header_access Accept-Language allow all +# request_header_access Connection allow all +# request_header_access All deny all +# +# HTTP reply headers are controlled with the reply_header_access directive. +# +# By default, all headers are allowed (no anonymizing is performed). +#Default: +# No limits. + +# TAG: reply_header_access +# Usage: reply_header_access header_name allow|deny [!]aclname ... +# +# WARNING: Doing this VIOLATES the HTTP standard. Enabling +# this feature could make you liable for problems which it +# causes. +# +# This option only applies to reply headers, i.e., from the +# server to the client. +# +# This is the same as request_header_access, but in the other +# direction. Please see request_header_access for detailed +# documentation. +# +# For example, to achieve the same behavior as the old +# 'http_anonymizer standard' option, you should use: +# +# reply_header_access Server deny all +# reply_header_access WWW-Authenticate deny all +# reply_header_access Link deny all +# +# Or, to reproduce the old 'http_anonymizer paranoid' feature +# you should use: +# +# reply_header_access Allow allow all +# reply_header_access WWW-Authenticate allow all +# reply_header_access Proxy-Authenticate allow all +# reply_header_access Cache-Control allow all +# reply_header_access Content-Encoding allow all +# reply_header_access Content-Length allow all +# reply_header_access Content-Type allow all +# reply_header_access Date allow all +# reply_header_access Expires allow all +# reply_header_access Last-Modified allow all +# reply_header_access Location allow all +# reply_header_access Pragma allow all +# reply_header_access Content-Language allow all +# reply_header_access Retry-After allow all +# reply_header_access Title allow all +# reply_header_access Content-Disposition allow all +# reply_header_access Connection allow all +# reply_header_access All deny all +# +# HTTP request headers are controlled with the request_header_access directive. +# +# By default, all headers are allowed (no anonymizing is +# performed). +#Default: +# No limits. + +# TAG: request_header_replace +# Usage: request_header_replace header_name message +# Example: request_header_replace User-Agent Nutscrape/1.0 (CP/M; 8-bit) +# +# This option allows you to change the contents of headers +# denied with request_header_access above, by replacing them +# with some fixed string. +# +# This only applies to request headers, not reply headers. +# +# By default, headers are removed if denied. +#Default: +# none + +# TAG: reply_header_replace +# Usage: reply_header_replace header_name message +# Example: reply_header_replace Server Foo/1.0 +# +# This option allows you to change the contents of headers +# denied with reply_header_access above, by replacing them +# with some fixed string. +# +# This only applies to reply headers, not request headers. +# +# By default, headers are removed if denied. +#Default: +# none + +# TAG: request_header_add +# Usage: request_header_add field-name field-value acl1 [acl2] ... +# Example: request_header_add X-Client-CA "CA=%ssl::>cert_issuer" all +# +# This option adds header fields to outgoing HTTP requests (i.e., +# request headers sent by Squid to the next HTTP hop such as a +# cache peer or an origin server). The option has no effect during +# cache hit detection. The equivalent adaptation vectoring point +# in ICAP terminology is post-cache REQMOD. +# +# Field-name is a token specifying an HTTP header name. If a +# standard HTTP header name is used, Squid does not check whether +# the new header conflicts with any existing headers or violates +# HTTP rules. If the request to be modified already contains a +# field with the same name, the old field is preserved but the +# header field values are not merged. +# +# Field-value is either a token or a quoted string. If quoted +# string format is used, then the surrounding quotes are removed +# while escape sequences and %macros are processed. +# +# In theory, all of the logformat codes can be used as %macros. +# However, unlike logging (which happens at the very end of +# transaction lifetime), the transaction may not yet have enough +# information to expand a macro when the new header value is needed. +# And some information may already be available to Squid but not yet +# committed where the macro expansion code can access it (report +# such instances!). The macro will be expanded into a single dash +# ('-') in such cases. Not all macros have been tested. +# +# One or more Squid ACLs may be specified to restrict header +# injection to matching requests. As always in squid.conf, all +# ACLs in an option ACL list must be satisfied for the insertion +# to happen. The request_header_add option supports fast ACLs +# only. +#Default: +# none + +# TAG: note +# This option used to log custom information about the master +# transaction. For example, an admin may configure Squid to log +# which "user group" the transaction belongs to, where "user group" +# will be determined based on a set of ACLs and not [just] +# authentication information. +# Values of key/value pairs can be logged using %{key}note macros: +# +# note key value acl ... +# logformat myFormat ... %{key}note ... +#Default: +# none + +# TAG: relaxed_header_parser on|off|warn +# In the default "on" setting Squid accepts certain forms +# of non-compliant HTTP messages where it is unambiguous +# what the sending application intended even if the message +# is not correctly formatted. The messages is then normalized +# to the correct form when forwarded by Squid. +# +# If set to "warn" then a warning will be emitted in cache.log +# each time such HTTP error is encountered. +# +# If set to "off" then such HTTP errors will cause the request +# or response to be rejected. +#Default: +# relaxed_header_parser on + +# TAG: collapsed_forwarding (on|off) +# When enabled, instead of forwarding each concurrent request for +# the same URL, Squid just sends the first of them. The other, so +# called "collapsed" requests, wait for the response to the first +# request and, if it happens to be cachable, use that response. +# Here, "concurrent requests" means "received after the first +# request headers were parsed and before the corresponding response +# headers were parsed". +# +# This feature is disabled by default: enabling collapsed +# forwarding needlessly delays forwarding requests that look +# cachable (when they are collapsed) but then need to be forwarded +# individually anyway because they end up being for uncachable +# content. However, in some cases, such as acceleration of highly +# cachable content with periodic or grouped expiration times, the +# gains from collapsing [large volumes of simultaneous refresh +# requests] outweigh losses from such delays. +# +# Squid collapses two kinds of requests: regular client requests +# received on one of the listening ports and internal "cache +# revalidation" requests which are triggered by those regular +# requests hitting a stale cached object. Revalidation collapsing +# is currently disabled for Squid instances containing SMP-aware +# disk or memory caches and for Vary-controlled cached objects. +#Default: +# collapsed_forwarding off + +# TIMEOUTS +# ----------------------------------------------------------------------------- + +# TAG: forward_timeout time-units +# This parameter specifies how long Squid should at most attempt in +# finding a forwarding path for the request before giving up. +#Default: +# forward_timeout 4 minutes + +# TAG: connect_timeout time-units +# This parameter specifies how long to wait for the TCP connect to +# the requested server or peer to complete before Squid should +# attempt to find another path where to forward the request. +#Default: +# connect_timeout 1 minute + +# TAG: peer_connect_timeout time-units +# This parameter specifies how long to wait for a pending TCP +# connection to a peer cache. The default is 30 seconds. You +# may also set different timeout values for individual neighbors +# with the 'connect-timeout' option on a 'cache_peer' line. +#Default: +# peer_connect_timeout 30 seconds + +# TAG: read_timeout time-units +# Applied on peer server connections. +# +# After each successful read(), the timeout will be extended by this +# amount. If no data is read again after this amount of time, +# the request is aborted and logged with ERR_READ_TIMEOUT. +# +# The default is 15 minutes. +#Default: +# read_timeout 15 minutes + +# TAG: write_timeout time-units +# This timeout is tracked for all connections that have data +# available for writing and are waiting for the socket to become +# ready. After each successful write, the timeout is extended by +# the configured amount. If Squid has data to write but the +# connection is not ready for the configured duration, the +# transaction associated with the connection is terminated. The +# default is 15 minutes. +#Default: +# write_timeout 15 minutes + +# TAG: request_timeout +# How long to wait for complete HTTP request headers after initial +# connection establishment. +#Default: +# request_timeout 5 minutes + +# TAG: client_idle_pconn_timeout +# How long to wait for the next HTTP request on a persistent +# client connection after the previous request completes. +#Default: +# client_idle_pconn_timeout 2 minutes + +# TAG: ftp_client_idle_timeout +# How long to wait for an FTP request on a connection to Squid ftp_port. +# Many FTP clients do not deal with idle connection closures well, +# necessitating a longer default timeout than client_idle_pconn_timeout +# used for incoming HTTP requests. +#Default: +# ftp_client_idle_timeout 30 minutes + +# TAG: client_lifetime time-units +# The maximum amount of time a client (browser) is allowed to +# remain connected to the cache process. This protects the Cache +# from having a lot of sockets (and hence file descriptors) tied up +# in a CLOSE_WAIT state from remote clients that go away without +# properly shutting down (either because of a network failure or +# because of a poor client implementation). The default is one +# day, 1440 minutes. +# +# NOTE: The default value is intended to be much larger than any +# client would ever need to be connected to your cache. You +# should probably change client_lifetime only as a last resort. +# If you seem to have many client connections tying up +# filedescriptors, we recommend first tuning the read_timeout, +# request_timeout, persistent_request_timeout and quick_abort values. +#Default: +# client_lifetime 1 day + +# TAG: half_closed_clients +# Some clients may shutdown the sending side of their TCP +# connections, while leaving their receiving sides open. Sometimes, +# Squid can not tell the difference between a half-closed and a +# fully-closed TCP connection. +# +# By default, Squid will immediately close client connections when +# read(2) returns "no more data to read." +# +# Change this option to 'on' and Squid will keep open connections +# until a read(2) or write(2) on the socket returns an error. +# This may show some benefits for reverse proxies. But if not +# it is recommended to leave OFF. +#Default: +# half_closed_clients off + +# TAG: server_idle_pconn_timeout +# Timeout for idle persistent connections to servers and other +# proxies. +#Default: +# server_idle_pconn_timeout 1 minute + +# TAG: ident_timeout +# Maximum time to wait for IDENT lookups to complete. +# +# If this is too high, and you enabled IDENT lookups from untrusted +# users, you might be susceptible to denial-of-service by having +# many ident requests going at once. +#Default: +# ident_timeout 10 seconds + +# TAG: shutdown_lifetime time-units +# When SIGTERM or SIGHUP is received, the cache is put into +# "shutdown pending" mode until all active sockets are closed. +# This value is the lifetime to set for all open descriptors +# during shutdown mode. Any active clients after this many +# seconds will receive a 'timeout' message. +#Default: +# shutdown_lifetime 30 seconds + +# ADMINISTRATIVE PARAMETERS +# ----------------------------------------------------------------------------- + +# TAG: cache_mgr +# Email-address of local cache manager who will receive +# mail if the cache dies. The default is "webmaster". +#Default: +# cache_mgr webmaster + +# TAG: mail_from +# From: email-address for mail sent when the cache dies. +# The default is to use 'squid@unique_hostname'. +# +# See also: unique_hostname directive. +#Default: +# none + +# TAG: mail_program +# Email program used to send mail if the cache dies. +# The default is "mail". The specified program must comply +# with the standard Unix mail syntax: +# mail-program recipient < mailfile +# +# Optional command line options can be specified. +#Default: +# mail_program mail + +# TAG: cache_effective_user +# If you start Squid as root, it will change its effective/real +# UID/GID to the user specified below. The default is to change +# to UID of proxy. +# see also; cache_effective_group +#Default: +# cache_effective_user proxy + +# TAG: cache_effective_group +# Squid sets the GID to the effective user's default group ID +# (taken from the password file) and supplementary group list +# from the groups membership. +# +# If you want Squid to run with a specific GID regardless of +# the group memberships of the effective user then set this +# to the group (or GID) you want Squid to run as. When set +# all other group privileges of the effective user are ignored +# and only this GID is effective. If Squid is not started as +# root the user starting Squid MUST be member of the specified +# group. +# +# This option is not recommended by the Squid Team. +# Our preference is for administrators to configure a secure +# user account for squid with UID/GID matching system policies. +#Default: +# Use system group memberships of the cache_effective_user account + +# TAG: httpd_suppress_version_string on|off +# Suppress Squid version string info in HTTP headers and HTML error pages. +#Default: +# httpd_suppress_version_string off + +# TAG: visible_hostname +# If you want to present a special hostname in error messages, etc, +# define this. Otherwise, the return value of gethostname() +# will be used. If you have multiple caches in a cluster and +# get errors about IP-forwarding you must set them to have individual +# names with this setting. +#Default: +# Automatically detect the system host name + +# TAG: unique_hostname +# If you want to have multiple machines with the same +# 'visible_hostname' you must give each machine a different +# 'unique_hostname' so forwarding loops can be detected. +#Default: +# Copy the value from visible_hostname + +# TAG: hostname_aliases +# A list of other DNS names your cache has. +#Default: +# none + +# TAG: umask +# Minimum umask which should be enforced while the proxy +# is running, in addition to the umask set at startup. +# +# For a traditional octal representation of umasks, start +# your value with 0. +#Default: +# umask 027 + +# OPTIONS FOR THE CACHE REGISTRATION SERVICE +# ----------------------------------------------------------------------------- +# +# This section contains parameters for the (optional) cache +# announcement service. This service is provided to help +# cache administrators locate one another in order to join or +# create cache hierarchies. +# +# An 'announcement' message is sent (via UDP) to the registration +# service by Squid. By default, the announcement message is NOT +# SENT unless you enable it with 'announce_period' below. +# +# The announcement message includes your hostname, plus the +# following information from this configuration file: +# +# http_port +# icp_port +# cache_mgr +# +# All current information is processed regularly and made +# available on the Web at http://www.ircache.net/Cache/Tracker/. + +# TAG: announce_period +# This is how frequently to send cache announcements. +# +# To enable announcing your cache, just set an announce period. +# +# Example: +# announce_period 1 day +#Default: +# Announcement messages disabled. + +# TAG: announce_host +# Set the hostname where announce registration messages will be sent. +# +# See also announce_port and announce_file +#Default: +# announce_host tracker.ircache.net + +# TAG: announce_file +# The contents of this file will be included in the announce +# registration messages. +#Default: +# none + +# TAG: announce_port +# Set the port where announce registration messages will be sent. +# +# See also announce_host and announce_file +#Default: +# announce_port 3131 + +# HTTPD-ACCELERATOR OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: httpd_accel_surrogate_id +# Surrogates (http://www.esi.org/architecture_spec_1.0.html) +# need an identification token to allow control targeting. Because +# a farm of surrogates may all perform the same tasks, they may share +# an identification token. +#Default: +# visible_hostname is used if no specific ID is set. + +# TAG: http_accel_surrogate_remote on|off +# Remote surrogates (such as those in a CDN) honour the header +# "Surrogate-Control: no-store-remote". +# +# Set this to on to have squid behave as a remote surrogate. +#Default: +# http_accel_surrogate_remote off + +# TAG: esi_parser libxml2|expat|custom +# ESI markup is not strictly XML compatible. The custom ESI parser +# will give higher performance, but cannot handle non ASCII character +# encodings. +#Default: +# esi_parser custom + +# DELAY POOL PARAMETERS +# ----------------------------------------------------------------------------- + +# TAG: delay_pools +# This represents the number of delay pools to be used. For example, +# if you have one class 2 delay pool and one class 3 delays pool, you +# have a total of 2 delay pools. +# +# See also delay_parameters, delay_class, delay_access for pool +# configuration details. +#Default: +# delay_pools 0 + +# TAG: delay_class +# This defines the class of each delay pool. There must be exactly one +# delay_class line for each delay pool. For example, to define two +# delay pools, one of class 2 and one of class 3, the settings above +# and here would be: +# +# Example: +# delay_pools 4 # 4 delay pools +# delay_class 1 2 # pool 1 is a class 2 pool +# delay_class 2 3 # pool 2 is a class 3 pool +# delay_class 3 4 # pool 3 is a class 4 pool +# delay_class 4 5 # pool 4 is a class 5 pool +# +# The delay pool classes are: +# +# class 1 Everything is limited by a single aggregate +# bucket. +# +# class 2 Everything is limited by a single aggregate +# bucket as well as an "individual" bucket chosen +# from bits 25 through 32 of the IPv4 address. +# +# class 3 Everything is limited by a single aggregate +# bucket as well as a "network" bucket chosen +# from bits 17 through 24 of the IP address and a +# "individual" bucket chosen from bits 17 through +# 32 of the IPv4 address. +# +# class 4 Everything in a class 3 delay pool, with an +# additional limit on a per user basis. This +# only takes effect if the username is established +# in advance - by forcing authentication in your +# http_access rules. +# +# class 5 Requests are grouped according their tag (see +# external_acl's tag= reply). +# +# +# Each pool also requires a delay_parameters directive to configure the pool size +# and speed limits used whenever the pool is applied to a request. Along with +# a set of delay_access directives to determine when it is used. +# +# NOTE: If an IP address is a.b.c.d +# -> bits 25 through 32 are "d" +# -> bits 17 through 24 are "c" +# -> bits 17 through 32 are "c * 256 + d" +# +# NOTE-2: Due to the use of bitmasks in class 2,3,4 pools they only apply to +# IPv4 traffic. Class 1 and 5 pools may be used with IPv6 traffic. +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +# See also delay_parameters and delay_access. +#Default: +# none + +# TAG: delay_access +# This is used to determine which delay pool a request falls into. +# +# delay_access is sorted per pool and the matching starts with pool 1, +# then pool 2, ..., and finally pool N. The first delay pool where the +# request is allowed is selected for the request. If it does not allow +# the request to any pool then the request is not delayed (default). +# +# For example, if you want some_big_clients in delay +# pool 1 and lotsa_little_clients in delay pool 2: +# +# delay_access 1 allow some_big_clients +# delay_access 1 deny all +# delay_access 2 allow lotsa_little_clients +# delay_access 2 deny all +# delay_access 3 allow authenticated_clients +# +# See also delay_parameters and delay_class. +# +#Default: +# Deny using the pool, unless allow rules exist in squid.conf for the pool. + +# TAG: delay_parameters +# This defines the parameters for a delay pool. Each delay pool has +# a number of "buckets" associated with it, as explained in the +# description of delay_class. +# +# For a class 1 delay pool, the syntax is: +# delay_class pool 1 +# delay_parameters pool aggregate +# +# For a class 2 delay pool: +# delay_class pool 2 +# delay_parameters pool aggregate individual +# +# For a class 3 delay pool: +# delay_class pool 3 +# delay_parameters pool aggregate network individual +# +# For a class 4 delay pool: +# delay_class pool 4 +# delay_parameters pool aggregate network individual user +# +# For a class 5 delay pool: +# delay_class pool 5 +# delay_parameters pool tagrate +# +# The option variables are: +# +# pool a pool number - ie, a number between 1 and the +# number specified in delay_pools as used in +# delay_class lines. +# +# aggregate the speed limit parameters for the aggregate bucket +# (class 1, 2, 3). +# +# individual the speed limit parameters for the individual +# buckets (class 2, 3). +# +# network the speed limit parameters for the network buckets +# (class 3). +# +# user the speed limit parameters for the user buckets +# (class 4). +# +# tagrate the speed limit parameters for the tag buckets +# (class 5). +# +# A pair of delay parameters is written restore/maximum, where restore is +# the number of bytes (not bits - modem and network speeds are usually +# quoted in bits) per second placed into the bucket, and maximum is the +# maximum number of bytes which can be in the bucket at any time. +# +# There must be one delay_parameters line for each delay pool. +# +# +# For example, if delay pool number 1 is a class 2 delay pool as in the +# above example, and is being used to strictly limit each host to 64Kbit/sec +# (plus overheads), with no overall limit, the line is: +# +# delay_parameters 1 none 8000/8000 +# +# Note that 8 x 8K Byte/sec -> 64K bit/sec. +# +# Note that the word 'none' is used to represent no limit. +# +# +# And, if delay pool number 2 is a class 3 delay pool as in the above +# example, and you want to limit it to a total of 256Kbit/sec (strict limit) +# with each 8-bit network permitted 64Kbit/sec (strict limit) and each +# individual host permitted 4800bit/sec with a bucket maximum size of 64Kbits +# to permit a decent web page to be downloaded at a decent speed +# (if the network is not being limited due to overuse) but slow down +# large downloads more significantly: +# +# delay_parameters 2 32000/32000 8000/8000 600/8000 +# +# Note that 8 x 32K Byte/sec -> 256K bit/sec. +# 8 x 8K Byte/sec -> 64K bit/sec. +# 8 x 600 Byte/sec -> 4800 bit/sec. +# +# +# Finally, for a class 4 delay pool as in the example - each user will +# be limited to 128Kbits/sec no matter how many workstations they are logged into.: +# +# delay_parameters 4 32000/32000 8000/8000 600/64000 16000/16000 +# +# +# See also delay_class and delay_access. +# +#Default: +# none + +# TAG: delay_initial_bucket_level (percent, 0-100) +# The initial bucket percentage is used to determine how much is put +# in each bucket when squid starts, is reconfigured, or first notices +# a host accessing it (in class 2 and class 3, individual hosts and +# networks only have buckets associated with them once they have been +# "seen" by squid). +#Default: +# delay_initial_bucket_level 50 + +# CLIENT DELAY POOL PARAMETERS +# ----------------------------------------------------------------------------- + +# TAG: client_delay_pools +# This option specifies the number of client delay pools used. It must +# preceed other client_delay_* options. +# +# Example: +# client_delay_pools 2 +# +# See also client_delay_parameters and client_delay_access. +#Default: +# client_delay_pools 0 + +# TAG: client_delay_initial_bucket_level (percent, 0-no_limit) +# This option determines the initial bucket size as a percentage of +# max_bucket_size from client_delay_parameters. Buckets are created +# at the time of the "first" connection from the matching IP. Idle +# buckets are periodically deleted up. +# +# You can specify more than 100 percent but note that such "oversized" +# buckets are not refilled until their size goes down to max_bucket_size +# from client_delay_parameters. +# +# Example: +# client_delay_initial_bucket_level 50 +#Default: +# client_delay_initial_bucket_level 50 + +# TAG: client_delay_parameters +# +# This option configures client-side bandwidth limits using the +# following format: +# +# client_delay_parameters pool speed_limit max_bucket_size +# +# pool is an integer ID used for client_delay_access matching. +# +# speed_limit is bytes added to the bucket per second. +# +# max_bucket_size is the maximum size of a bucket, enforced after any +# speed_limit additions. +# +# Please see the delay_parameters option for more information and +# examples. +# +# Example: +# client_delay_parameters 1 1024 2048 +# client_delay_parameters 2 51200 16384 +# +# See also client_delay_access. +# +#Default: +# none + +# TAG: client_delay_access +# This option determines the client-side delay pool for the +# request: +# +# client_delay_access pool_ID allow|deny acl_name +# +# All client_delay_access options are checked in their pool ID +# order, starting with pool 1. The first checked pool with allowed +# request is selected for the request. If no ACL matches or there +# are no client_delay_access options, the request bandwidth is not +# limited. +# +# The ACL-selected pool is then used to find the +# client_delay_parameters for the request. Client-side pools are +# not used to aggregate clients. Clients are always aggregated +# based on their source IP addresses (one bucket per source IP). +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# Additionally, only the client TCP connection details are available. +# ACLs testing HTTP properties will not work. +# +# Please see delay_access for more examples. +# +# Example: +# client_delay_access 1 allow low_rate_network +# client_delay_access 2 allow vips_network +# +# +# See also client_delay_parameters and client_delay_pools. +#Default: +# Deny use of the pool, unless allow rules exist in squid.conf for the pool. + +# WCCPv1 AND WCCPv2 CONFIGURATION OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: wccp_router +# Use this option to define your WCCP ``home'' router for +# Squid. +# +# wccp_router supports a single WCCP(v1) router +# +# wccp2_router supports multiple WCCPv2 routers +# +# only one of the two may be used at the same time and defines +# which version of WCCP to use. +#Default: +# WCCP disabled. + +# TAG: wccp2_router +# Use this option to define your WCCP ``home'' router for +# Squid. +# +# wccp_router supports a single WCCP(v1) router +# +# wccp2_router supports multiple WCCPv2 routers +# +# only one of the two may be used at the same time and defines +# which version of WCCP to use. +#Default: +# WCCPv2 disabled. + +# TAG: wccp_version +# This directive is only relevant if you need to set up WCCP(v1) +# to some very old and end-of-life Cisco routers. In all other +# setups it must be left unset or at the default setting. +# It defines an internal version in the WCCP(v1) protocol, +# with version 4 being the officially documented protocol. +# +# According to some users, Cisco IOS 11.2 and earlier only +# support WCCP version 3. If you're using that or an earlier +# version of IOS, you may need to change this value to 3, otherwise +# do not specify this parameter. +#Default: +# wccp_version 4 + +# TAG: wccp2_rebuild_wait +# If this is enabled Squid will wait for the cache dir rebuild to finish +# before sending the first wccp2 HereIAm packet +#Default: +# wccp2_rebuild_wait on + +# TAG: wccp2_forwarding_method +# WCCP2 allows the setting of forwarding methods between the +# router/switch and the cache. Valid values are as follows: +# +# gre - GRE encapsulation (forward the packet in a GRE/WCCP tunnel) +# l2 - L2 redirect (forward the packet using Layer 2/MAC rewriting) +# +# Currently (as of IOS 12.4) cisco routers only support GRE. +# Cisco switches only support the L2 redirect assignment method. +#Default: +# wccp2_forwarding_method gre + +# TAG: wccp2_return_method +# WCCP2 allows the setting of return methods between the +# router/switch and the cache for packets that the cache +# decides not to handle. Valid values are as follows: +# +# gre - GRE encapsulation (forward the packet in a GRE/WCCP tunnel) +# l2 - L2 redirect (forward the packet using Layer 2/MAC rewriting) +# +# Currently (as of IOS 12.4) cisco routers only support GRE. +# Cisco switches only support the L2 redirect assignment. +# +# If the "ip wccp redirect exclude in" command has been +# enabled on the cache interface, then it is still safe for +# the proxy server to use a l2 redirect method even if this +# option is set to GRE. +#Default: +# wccp2_return_method gre + +# TAG: wccp2_assignment_method +# WCCP2 allows the setting of methods to assign the WCCP hash +# Valid values are as follows: +# +# hash - Hash assignment +# mask - Mask assignment +# +# As a general rule, cisco routers support the hash assignment method +# and cisco switches support the mask assignment method. +#Default: +# wccp2_assignment_method hash + +# TAG: wccp2_service +# WCCP2 allows for multiple traffic services. There are two +# types: "standard" and "dynamic". The standard type defines +# one service id - http (id 0). The dynamic service ids can be from +# 51 to 255 inclusive. In order to use a dynamic service id +# one must define the type of traffic to be redirected; this is done +# using the wccp2_service_info option. +# +# The "standard" type does not require a wccp2_service_info option, +# just specifying the service id will suffice. +# +# MD5 service authentication can be enabled by adding +# "password=" to the end of this service declaration. +# +# Examples: +# +# wccp2_service standard 0 # for the 'web-cache' standard service +# wccp2_service dynamic 80 # a dynamic service type which will be +# # fleshed out with subsequent options. +# wccp2_service standard 0 password=foo +#Default: +# Use the 'web-cache' standard service. + +# TAG: wccp2_service_info +# Dynamic WCCPv2 services require further information to define the +# traffic you wish to have diverted. +# +# The format is: +# +# wccp2_service_info protocol= flags=,.. +# priority= ports=,.. +# +# The relevant WCCPv2 flags: +# + src_ip_hash, dst_ip_hash +# + source_port_hash, dst_port_hash +# + src_ip_alt_hash, dst_ip_alt_hash +# + src_port_alt_hash, dst_port_alt_hash +# + ports_source +# +# The port list can be one to eight entries. +# +# Example: +# +# wccp2_service_info 80 protocol=tcp flags=src_ip_hash,ports_source +# priority=240 ports=80 +# +# Note: the service id must have been defined by a previous +# 'wccp2_service dynamic ' entry. +#Default: +# none + +# TAG: wccp2_weight +# Each cache server gets assigned a set of the destination +# hash proportional to their weight. +#Default: +# wccp2_weight 10000 + +# TAG: wccp_address +# Use this option if you require WCCPv2 to use a specific +# interface address. +# +# The default behavior is to not bind to any specific address. +#Default: +# Address selected by the operating system. + +# TAG: wccp2_address +# Use this option if you require WCCP to use a specific +# interface address. +# +# The default behavior is to not bind to any specific address. +#Default: +# Address selected by the operating system. + +# PERSISTENT CONNECTION HANDLING +# ----------------------------------------------------------------------------- +# +# Also see "pconn_timeout" in the TIMEOUTS section + +# TAG: client_persistent_connections +# Persistent connection support for clients. +# Squid uses persistent connections (when allowed). You can use +# this option to disable persistent connections with clients. +#Default: +# client_persistent_connections on + +# TAG: server_persistent_connections +# Persistent connection support for servers. +# Squid uses persistent connections (when allowed). You can use +# this option to disable persistent connections with servers. +#Default: +# server_persistent_connections on + +# TAG: persistent_connection_after_error +# With this directive the use of persistent connections after +# HTTP errors can be disabled. Useful if you have clients +# who fail to handle errors on persistent connections proper. +#Default: +# persistent_connection_after_error on + +# TAG: detect_broken_pconn +# Some servers have been found to incorrectly signal the use +# of HTTP/1.0 persistent connections even on replies not +# compatible, causing significant delays. This server problem +# has mostly been seen on redirects. +# +# By enabling this directive Squid attempts to detect such +# broken replies and automatically assume the reply is finished +# after 10 seconds timeout. +#Default: +# detect_broken_pconn off + +# CACHE DIGEST OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: digest_generation +# This controls whether the server will generate a Cache Digest +# of its contents. By default, Cache Digest generation is +# enabled if Squid is compiled with --enable-cache-digests defined. +#Default: +# digest_generation on + +# TAG: digest_bits_per_entry +# This is the number of bits of the server's Cache Digest which +# will be associated with the Digest entry for a given HTTP +# Method and URL (public key) combination. The default is 5. +#Default: +# digest_bits_per_entry 5 + +# TAG: digest_rebuild_period (seconds) +# This is the wait time between Cache Digest rebuilds. +#Default: +# digest_rebuild_period 1 hour + +# TAG: digest_rewrite_period (seconds) +# This is the wait time between Cache Digest writes to +# disk. +#Default: +# digest_rewrite_period 1 hour + +# TAG: digest_swapout_chunk_size (bytes) +# This is the number of bytes of the Cache Digest to write to +# disk at a time. It defaults to 4096 bytes (4KB), the Squid +# default swap page. +#Default: +# digest_swapout_chunk_size 4096 bytes + +# TAG: digest_rebuild_chunk_percentage (percent, 0-100) +# This is the percentage of the Cache Digest to be scanned at a +# time. By default it is set to 10% of the Cache Digest. +#Default: +# digest_rebuild_chunk_percentage 10 + +# SNMP OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: snmp_port +# The port number where Squid listens for SNMP requests. To enable +# SNMP support set this to a suitable port number. Port number +# 3401 is often used for the Squid SNMP agent. By default it's +# set to "0" (disabled) +# +# Example: +# snmp_port 3401 +#Default: +# SNMP disabled. + +# TAG: snmp_access +# Allowing or denying access to the SNMP port. +# +# All access to the agent is denied by default. +# usage: +# +# snmp_access allow|deny [!]aclname ... +# +# This clause only supports fast acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +# +#Example: +# snmp_access allow snmppublic localhost +# snmp_access deny all +#Default: +# Deny, unless rules exist in squid.conf. + +# TAG: snmp_incoming_address +# Just like 'udp_incoming_address', but for the SNMP port. +# +# snmp_incoming_address is used for the SNMP socket receiving +# messages from SNMP agents. +# +# The default snmp_incoming_address is to listen on all +# available network interfaces. +#Default: +# Accept SNMP packets from all machine interfaces. + +# TAG: snmp_outgoing_address +# Just like 'udp_outgoing_address', but for the SNMP port. +# +# snmp_outgoing_address is used for SNMP packets returned to SNMP +# agents. +# +# If snmp_outgoing_address is not set it will use the same socket +# as snmp_incoming_address. Only change this if you want to have +# SNMP replies sent using another address than where this Squid +# listens for SNMP queries. +# +# NOTE, snmp_incoming_address and snmp_outgoing_address can not have +# the same value since they both use the same port. +#Default: +# Use snmp_incoming_address or an address selected by the operating system. + +# ICP OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: icp_port +# The port number where Squid sends and receives ICP queries to +# and from neighbor caches. The standard UDP port for ICP is 3130. +# +# Example: +# icp_port 3130 +#Default: +# ICP disabled. + +# TAG: htcp_port +# The port number where Squid sends and receives HTCP queries to +# and from neighbor caches. To turn it on you want to set it to +# 4827. +# +# Example: +# htcp_port 4827 +#Default: +# HTCP disabled. + +# TAG: log_icp_queries on|off +# If set, ICP queries are logged to access.log. You may wish +# do disable this if your ICP load is VERY high to speed things +# up or to simplify log analysis. +#Default: +# log_icp_queries on + +# TAG: udp_incoming_address +# udp_incoming_address is used for UDP packets received from other +# caches. +# +# The default behavior is to not bind to any specific address. +# +# Only change this if you want to have all UDP queries received on +# a specific interface/address. +# +# NOTE: udp_incoming_address is used by the ICP, HTCP, and DNS +# modules. Altering it will affect all of them in the same manner. +# +# see also; udp_outgoing_address +# +# NOTE, udp_incoming_address and udp_outgoing_address can not +# have the same value since they both use the same port. +#Default: +# Accept packets from all machine interfaces. + +# TAG: udp_outgoing_address +# udp_outgoing_address is used for UDP packets sent out to other +# caches. +# +# The default behavior is to not bind to any specific address. +# +# Instead it will use the same socket as udp_incoming_address. +# Only change this if you want to have UDP queries sent using another +# address than where this Squid listens for UDP queries from other +# caches. +# +# NOTE: udp_outgoing_address is used by the ICP, HTCP, and DNS +# modules. Altering it will affect all of them in the same manner. +# +# see also; udp_incoming_address +# +# NOTE, udp_incoming_address and udp_outgoing_address can not +# have the same value since they both use the same port. +#Default: +# Use udp_incoming_address or an address selected by the operating system. + +# TAG: icp_hit_stale on|off +# If you want to return ICP_HIT for stale cache objects, set this +# option to 'on'. If you have sibling relationships with caches +# in other administrative domains, this should be 'off'. If you only +# have sibling relationships with caches under your control, +# it is probably okay to set this to 'on'. +# If set to 'on', your siblings should use the option "allow-miss" +# on their cache_peer lines for connecting to you. +#Default: +# icp_hit_stale off + +# TAG: minimum_direct_hops +# If using the ICMP pinging stuff, do direct fetches for sites +# which are no more than this many hops away. +#Default: +# minimum_direct_hops 4 + +# TAG: minimum_direct_rtt (msec) +# If using the ICMP pinging stuff, do direct fetches for sites +# which are no more than this many rtt milliseconds away. +#Default: +# minimum_direct_rtt 400 + +# TAG: netdb_low +# The low water mark for the ICMP measurement database. +# +# Note: high watermark controlled by netdb_high directive. +# +# These watermarks are counts, not percents. The defaults are +# (low) 900 and (high) 1000. When the high water mark is +# reached, database entries will be deleted until the low +# mark is reached. +#Default: +# netdb_low 900 + +# TAG: netdb_high +# The high water mark for the ICMP measurement database. +# +# Note: low watermark controlled by netdb_low directive. +# +# These watermarks are counts, not percents. The defaults are +# (low) 900 and (high) 1000. When the high water mark is +# reached, database entries will be deleted until the low +# mark is reached. +#Default: +# netdb_high 1000 + +# TAG: netdb_ping_period +# The minimum period for measuring a site. There will be at +# least this much delay between successive pings to the same +# network. The default is five minutes. +#Default: +# netdb_ping_period 5 minutes + +# TAG: query_icmp on|off +# If you want to ask your peers to include ICMP data in their ICP +# replies, enable this option. +# +# If your peer has configured Squid (during compilation) with +# '--enable-icmp' that peer will send ICMP pings to origin server +# sites of the URLs it receives. If you enable this option the +# ICP replies from that peer will include the ICMP data (if available). +# Then, when choosing a parent cache, Squid will choose the parent with +# the minimal RTT to the origin server. When this happens, the +# hierarchy field of the access.log will be +# "CLOSEST_PARENT_MISS". This option is off by default. +#Default: +# query_icmp off + +# TAG: test_reachability on|off +# When this is 'on', ICP MISS replies will be ICP_MISS_NOFETCH +# instead of ICP_MISS if the target host is NOT in the ICMP +# database, or has a zero RTT. +#Default: +# test_reachability off + +# TAG: icp_query_timeout (msec) +# Normally Squid will automatically determine an optimal ICP +# query timeout value based on the round-trip-time of recent ICP +# queries. If you want to override the value determined by +# Squid, set this 'icp_query_timeout' to a non-zero value. This +# value is specified in MILLISECONDS, so, to use a 2-second +# timeout (the old default), you would write: +# +# icp_query_timeout 2000 +#Default: +# Dynamic detection. + +# TAG: maximum_icp_query_timeout (msec) +# Normally the ICP query timeout is determined dynamically. But +# sometimes it can lead to very large values (say 5 seconds). +# Use this option to put an upper limit on the dynamic timeout +# value. Do NOT use this option to always use a fixed (instead +# of a dynamic) timeout value. To set a fixed timeout see the +# 'icp_query_timeout' directive. +#Default: +# maximum_icp_query_timeout 2000 + +# TAG: minimum_icp_query_timeout (msec) +# Normally the ICP query timeout is determined dynamically. But +# sometimes it can lead to very small timeouts, even lower than +# the normal latency variance on your link due to traffic. +# Use this option to put an lower limit on the dynamic timeout +# value. Do NOT use this option to always use a fixed (instead +# of a dynamic) timeout value. To set a fixed timeout see the +# 'icp_query_timeout' directive. +#Default: +# minimum_icp_query_timeout 5 + +# TAG: background_ping_rate time-units +# Controls how often the ICP pings are sent to siblings that +# have background-ping set. +#Default: +# background_ping_rate 10 seconds + +# MULTICAST ICP OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: mcast_groups +# This tag specifies a list of multicast groups which your server +# should join to receive multicasted ICP queries. +# +# NOTE! Be very careful what you put here! Be sure you +# understand the difference between an ICP _query_ and an ICP +# _reply_. This option is to be set only if you want to RECEIVE +# multicast queries. Do NOT set this option to SEND multicast +# ICP (use cache_peer for that). ICP replies are always sent via +# unicast, so this option does not affect whether or not you will +# receive replies from multicast group members. +# +# You must be very careful to NOT use a multicast address which +# is already in use by another group of caches. +# +# If you are unsure about multicast, please read the Multicast +# chapter in the Squid FAQ (http://www.squid-cache.org/FAQ/). +# +# Usage: mcast_groups 239.128.16.128 224.0.1.20 +# +# By default, Squid doesn't listen on any multicast groups. +#Default: +# none + +# TAG: mcast_miss_addr +# Note: This option is only available if Squid is rebuilt with the +# -DMULTICAST_MISS_STREAM define +# +# If you enable this option, every "cache miss" URL will +# be sent out on the specified multicast address. +# +# Do not enable this option unless you are are absolutely +# certain you understand what you are doing. +#Default: +# disabled. + +# TAG: mcast_miss_ttl +# Note: This option is only available if Squid is rebuilt with the +# -DMULTICAST_MISS_STREAM define +# +# This is the time-to-live value for packets multicasted +# when multicasting off cache miss URLs is enabled. By +# default this is set to 'site scope', i.e. 16. +#Default: +# mcast_miss_ttl 16 + +# TAG: mcast_miss_port +# Note: This option is only available if Squid is rebuilt with the +# -DMULTICAST_MISS_STREAM define +# +# This is the port number to be used in conjunction with +# 'mcast_miss_addr'. +#Default: +# mcast_miss_port 3135 + +# TAG: mcast_miss_encode_key +# Note: This option is only available if Squid is rebuilt with the +# -DMULTICAST_MISS_STREAM define +# +# The URLs that are sent in the multicast miss stream are +# encrypted. This is the encryption key. +#Default: +# mcast_miss_encode_key XXXXXXXXXXXXXXXX + +# TAG: mcast_icp_query_timeout (msec) +# For multicast peers, Squid regularly sends out ICP "probes" to +# count how many other peers are listening on the given multicast +# address. This value specifies how long Squid should wait to +# count all the replies. The default is 2000 msec, or 2 +# seconds. +#Default: +# mcast_icp_query_timeout 2000 + +# INTERNAL ICON OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: icon_directory +# Where the icons are stored. These are normally kept in +# /usr/share/squid/icons +#Default: +# icon_directory /usr/share/squid/icons + +# TAG: global_internal_static +# This directive controls is Squid should intercept all requests for +# /squid-internal-static/ no matter which host the URL is requesting +# (default on setting), or if nothing special should be done for +# such URLs (off setting). The purpose of this directive is to make +# icons etc work better in complex cache hierarchies where it may +# not always be possible for all corners in the cache mesh to reach +# the server generating a directory listing. +#Default: +# global_internal_static on + +# TAG: short_icon_urls +# If this is enabled Squid will use short URLs for icons. +# If disabled it will revert to the old behavior of including +# it's own name and port in the URL. +# +# If you run a complex cache hierarchy with a mix of Squid and +# other proxies you may need to disable this directive. +#Default: +# short_icon_urls on + +# ERROR PAGE OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: error_directory +# If you wish to create your own versions of the default +# error files to customize them to suit your company copy +# the error/template files to another directory and point +# this tag at them. +# +# WARNING: This option will disable multi-language support +# on error pages if used. +# +# The squid developers are interested in making squid available in +# a wide variety of languages. If you are making translations for a +# language that Squid does not currently provide please consider +# contributing your translation back to the project. +# http://wiki.squid-cache.org/Translations +# +# The squid developers working on translations are happy to supply drop-in +# translated error files in exchange for any new language contributions. +#Default: +# Send error pages in the clients preferred language + +# TAG: error_default_language +# Set the default language which squid will send error pages in +# if no existing translation matches the clients language +# preferences. +# +# If unset (default) generic English will be used. +# +# The squid developers are interested in making squid available in +# a wide variety of languages. If you are interested in making +# translations for any language see the squid wiki for details. +# http://wiki.squid-cache.org/Translations +#Default: +# Generate English language pages. + +# TAG: error_log_languages +# Log to cache.log what languages users are attempting to +# auto-negotiate for translations. +# +# Successful negotiations are not logged. Only failures +# have meaning to indicate that Squid may need an upgrade +# of its error page translations. +#Default: +# error_log_languages on + +# TAG: err_page_stylesheet +# CSS Stylesheet to pattern the display of Squid default error pages. +# +# For information on CSS see http://www.w3.org/Style/CSS/ +#Default: +# err_page_stylesheet /etc/squid/errorpage.css + +# TAG: err_html_text +# HTML text to include in error messages. Make this a "mailto" +# URL to your admin address, or maybe just a link to your +# organizations Web page. +# +# To include this in your error messages, you must rewrite +# the error template files (found in the "errors" directory). +# Wherever you want the 'err_html_text' line to appear, +# insert a %L tag in the error template file. +#Default: +# none + +# TAG: email_err_data on|off +# If enabled, information about the occurred error will be +# included in the mailto links of the ERR pages (if %W is set) +# so that the email body contains the data. +# Syntax is %w +#Default: +# email_err_data on + +# TAG: deny_info +# Usage: deny_info err_page_name acl +# or deny_info http://... acl +# or deny_info TCP_RESET acl +# +# This can be used to return a ERR_ page for requests which +# do not pass the 'http_access' rules. Squid remembers the last +# acl it evaluated in http_access, and if a 'deny_info' line exists +# for that ACL Squid returns a corresponding error page. +# +# The acl is typically the last acl on the http_access deny line which +# denied access. The exceptions to this rule are: +# - When Squid needs to request authentication credentials. It's then +# the first authentication related acl encountered +# - When none of the http_access lines matches. It's then the last +# acl processed on the last http_access line. +# - When the decision to deny access was made by an adaptation service, +# the acl name is the corresponding eCAP or ICAP service_name. +# +# NP: If providing your own custom error pages with error_directory +# you may also specify them by your custom file name: +# Example: deny_info ERR_CUSTOM_ACCESS_DENIED bad_guys +# +# By defaut Squid will send "403 Forbidden". A different 4xx or 5xx +# may be specified by prefixing the file name with the code and a colon. +# e.g. 404:ERR_CUSTOM_ACCESS_DENIED +# +# Alternatively you can tell Squid to reset the TCP connection +# by specifying TCP_RESET. +# +# Or you can specify an error URL or URL pattern. The browsers will +# get redirected to the specified URL after formatting tags have +# been replaced. Redirect will be done with 302 or 307 according to +# HTTP/1.1 specs. A different 3xx code may be specified by prefixing +# the URL. e.g. 303:http://example.com/ +# +# URL FORMAT TAGS: +# %a - username (if available. Password NOT included) +# %B - FTP path URL +# %e - Error number +# %E - Error description +# %h - Squid hostname +# %H - Request domain name +# %i - Client IP Address +# %M - Request Method +# %o - Message result from external ACL helper +# %p - Request Port number +# %P - Request Protocol name +# %R - Request URL path +# %T - Timestamp in RFC 1123 format +# %U - Full canonical URL from client +# (HTTPS URLs terminate with *) +# %u - Full canonical URL from client +# %w - Admin email from squid.conf +# %x - Error name +# %% - Literal percent (%) code +# +#Default: +# none + +# OPTIONS INFLUENCING REQUEST FORWARDING +# ----------------------------------------------------------------------------- + +# TAG: nonhierarchical_direct +# By default, Squid will send any non-hierarchical requests +# (not cacheable request type) direct to origin servers. +# +# When this is set to "off", Squid will prefer to send these +# requests to parents. +# +# Note that in most configurations, by turning this off you will only +# add latency to these request without any improvement in global hit +# ratio. +# +# This option only sets a preference. If the parent is unavailable a +# direct connection to the origin server may still be attempted. To +# completely prevent direct connections use never_direct. +#Default: +# nonhierarchical_direct on + +# TAG: prefer_direct +# Normally Squid tries to use parents for most requests. If you for some +# reason like it to first try going direct and only use a parent if +# going direct fails set this to on. +# +# By combining nonhierarchical_direct off and prefer_direct on you +# can set up Squid to use a parent as a backup path if going direct +# fails. +# +# Note: If you want Squid to use parents for all requests see +# the never_direct directive. prefer_direct only modifies how Squid +# acts on cacheable requests. +#Default: +# prefer_direct off + +# TAG: cache_miss_revalidate on|off +# RFC 7232 defines a conditional request mechanism to prevent +# response objects being unnecessarily transferred over the network. +# If that mechanism is used by the client and a cache MISS occurs +# it can prevent new cache entries being created. +# +# This option determines whether Squid on cache MISS will pass the +# client revalidation request to the server or tries to fetch new +# content for caching. It can be useful while the cache is mostly +# empty to more quickly have the cache populated by generating +# non-conditional GETs. +# +# When set to 'on' (default), Squid will pass all client If-* headers +# to the server. This permits server responses without a cacheable +# payload to be delivered and on MISS no new cache entry is created. +# +# When set to 'off' and if the request is cacheable, Squid will +# remove the clients If-Modified-Since and If-None-Match headers from +# the request sent to the server. This requests a 200 status response +# from the server to create a new cache entry with. +#Default: +# cache_miss_revalidate on + +# TAG: always_direct +# Usage: always_direct allow|deny [!]aclname ... +# +# Here you can use ACL elements to specify requests which should +# ALWAYS be forwarded by Squid to the origin servers without using +# any peers. For example, to always directly forward requests for +# local servers ignoring any parents or siblings you may have use +# something like: +# +# acl local-servers dstdomain my.domain.net +# always_direct allow local-servers +# +# To always forward FTP requests directly, use +# +# acl FTP proto FTP +# always_direct allow FTP +# +# NOTE: There is a similar, but opposite option named +# 'never_direct'. You need to be aware that "always_direct deny +# foo" is NOT the same thing as "never_direct allow foo". You +# may need to use a deny rule to exclude a more-specific case of +# some other rule. Example: +# +# acl local-external dstdomain external.foo.net +# acl local-servers dstdomain .foo.net +# always_direct deny local-external +# always_direct allow local-servers +# +# NOTE: If your goal is to make the client forward the request +# directly to the origin server bypassing Squid then this needs +# to be done in the client configuration. Squid configuration +# can only tell Squid how Squid should fetch the object. +# +# NOTE: This directive is not related to caching. The replies +# is cached as usual even if you use always_direct. To not cache +# the replies see the 'cache' directive. +# +# This clause supports both fast and slow acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# Prevent any cache_peer being used for this request. + +# TAG: never_direct +# Usage: never_direct allow|deny [!]aclname ... +# +# never_direct is the opposite of always_direct. Please read +# the description for always_direct if you have not already. +# +# With 'never_direct' you can use ACL elements to specify +# requests which should NEVER be forwarded directly to origin +# servers. For example, to force the use of a proxy for all +# requests, except those in your local domain use something like: +# +# acl local-servers dstdomain .foo.net +# never_direct deny local-servers +# never_direct allow all +# +# or if Squid is inside a firewall and there are local intranet +# servers inside the firewall use something like: +# +# acl local-intranet dstdomain .foo.net +# acl local-external dstdomain external.foo.net +# always_direct deny local-external +# always_direct allow local-intranet +# never_direct allow all +# +# This clause supports both fast and slow acl types. +# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details. +#Default: +# Allow DNS results to be used for this request. + +# ADVANCED NETWORKING OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: incoming_udp_average +# Heavy voodoo here. I can't even believe you are reading this. +# Are you crazy? Don't even think about adjusting these unless +# you understand the algorithms in comm_select.c first! +#Default: +# incoming_udp_average 6 + +# TAG: incoming_tcp_average +# Heavy voodoo here. I can't even believe you are reading this. +# Are you crazy? Don't even think about adjusting these unless +# you understand the algorithms in comm_select.c first! +#Default: +# incoming_tcp_average 4 + +# TAG: incoming_dns_average +# Heavy voodoo here. I can't even believe you are reading this. +# Are you crazy? Don't even think about adjusting these unless +# you understand the algorithms in comm_select.c first! +#Default: +# incoming_dns_average 4 + +# TAG: min_udp_poll_cnt +# Heavy voodoo here. I can't even believe you are reading this. +# Are you crazy? Don't even think about adjusting these unless +# you understand the algorithms in comm_select.c first! +#Default: +# min_udp_poll_cnt 8 + +# TAG: min_dns_poll_cnt +# Heavy voodoo here. I can't even believe you are reading this. +# Are you crazy? Don't even think about adjusting these unless +# you understand the algorithms in comm_select.c first! +#Default: +# min_dns_poll_cnt 8 + +# TAG: min_tcp_poll_cnt +# Heavy voodoo here. I can't even believe you are reading this. +# Are you crazy? Don't even think about adjusting these unless +# you understand the algorithms in comm_select.c first! +#Default: +# min_tcp_poll_cnt 8 + +# TAG: accept_filter +# FreeBSD: +# +# The name of an accept(2) filter to install on Squid's +# listen socket(s). This feature is perhaps specific to +# FreeBSD and requires support in the kernel. +# +# The 'httpready' filter delays delivering new connections +# to Squid until a full HTTP request has been received. +# See the accf_http(9) man page for details. +# +# The 'dataready' filter delays delivering new connections +# to Squid until there is some data to process. +# See the accf_dataready(9) man page for details. +# +# Linux: +# +# The 'data' filter delays delivering of new connections +# to Squid until there is some data to process by TCP_ACCEPT_DEFER. +# You may optionally specify a number of seconds to wait by +# 'data=N' where N is the number of seconds. Defaults to 30 +# if not specified. See the tcp(7) man page for details. +#EXAMPLE: +## FreeBSD +#accept_filter httpready +## Linux +#accept_filter data +#Default: +# none + +# TAG: client_ip_max_connections +# Set an absolute limit on the number of connections a single +# client IP can use. Any more than this and Squid will begin to drop +# new connections from the client until it closes some links. +# +# Note that this is a global limit. It affects all HTTP, HTCP, Gopher and FTP +# connections from the client. For finer control use the ACL access controls. +# +# Requires client_db to be enabled (the default). +# +# WARNING: This may noticably slow down traffic received via external proxies +# or NAT devices and cause them to rebound error messages back to their clients. +#Default: +# No limit. + +# TAG: tcp_recv_bufsize (bytes) +# Size of receive buffer to set for TCP sockets. Probably just +# as easy to change your kernel's default. +# Omit from squid.conf to use the default buffer size. +#Default: +# Use operating system TCP defaults. + +# ICAP OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: icap_enable on|off +# If you want to enable the ICAP module support, set this to on. +#Default: +# icap_enable off + +# TAG: icap_connect_timeout +# This parameter specifies how long to wait for the TCP connect to +# the requested ICAP server to complete before giving up and either +# terminating the HTTP transaction or bypassing the failure. +# +# The default for optional services is peer_connect_timeout. +# The default for essential services is connect_timeout. +# If this option is explicitly set, its value applies to all services. +#Default: +# none + +# TAG: icap_io_timeout time-units +# This parameter specifies how long to wait for an I/O activity on +# an established, active ICAP connection before giving up and +# either terminating the HTTP transaction or bypassing the +# failure. +#Default: +# Use read_timeout. + +# TAG: icap_service_failure_limit limit [in memory-depth time-units] +# The limit specifies the number of failures that Squid tolerates +# when establishing a new TCP connection with an ICAP service. If +# the number of failures exceeds the limit, the ICAP service is +# not used for new ICAP requests until it is time to refresh its +# OPTIONS. +# +# A negative value disables the limit. Without the limit, an ICAP +# service will not be considered down due to connectivity failures +# between ICAP OPTIONS requests. +# +# Squid forgets ICAP service failures older than the specified +# value of memory-depth. The memory fading algorithm +# is approximate because Squid does not remember individual +# errors but groups them instead, splitting the option +# value into ten time slots of equal length. +# +# When memory-depth is 0 and by default this option has no +# effect on service failure expiration. +# +# Squid always forgets failures when updating service settings +# using an ICAP OPTIONS transaction, regardless of this option +# setting. +# +# For example, +# # suspend service usage after 10 failures in 5 seconds: +# icap_service_failure_limit 10 in 5 seconds +#Default: +# icap_service_failure_limit 10 + +# TAG: icap_service_revival_delay +# The delay specifies the number of seconds to wait after an ICAP +# OPTIONS request failure before requesting the options again. The +# failed ICAP service is considered "down" until fresh OPTIONS are +# fetched. +# +# The actual delay cannot be smaller than the hardcoded minimum +# delay of 30 seconds. +#Default: +# icap_service_revival_delay 180 + +# TAG: icap_preview_enable on|off +# The ICAP Preview feature allows the ICAP server to handle the +# HTTP message by looking only at the beginning of the message body +# or even without receiving the body at all. In some environments, +# previews greatly speedup ICAP processing. +# +# During an ICAP OPTIONS transaction, the server may tell Squid what +# HTTP messages should be previewed and how big the preview should be. +# Squid will not use Preview if the server did not request one. +# +# To disable ICAP Preview for all ICAP services, regardless of +# individual ICAP server OPTIONS responses, set this option to "off". +#Example: +#icap_preview_enable off +#Default: +# icap_preview_enable on + +# TAG: icap_preview_size +# The default size of preview data to be sent to the ICAP server. +# This value might be overwritten on a per server basis by OPTIONS requests. +#Default: +# No preview sent. + +# TAG: icap_206_enable on|off +# 206 (Partial Content) responses is an ICAP extension that allows the +# ICAP agents to optionally combine adapted and original HTTP message +# content. The decision to combine is postponed until the end of the +# ICAP response. Squid supports Partial Content extension by default. +# +# Activation of the Partial Content extension is negotiated with each +# ICAP service during OPTIONS exchange. Most ICAP servers should handle +# negotation correctly even if they do not support the extension, but +# some might fail. To disable Partial Content support for all ICAP +# services and to avoid any negotiation, set this option to "off". +# +# Example: +# icap_206_enable off +#Default: +# icap_206_enable on + +# TAG: icap_default_options_ttl +# The default TTL value for ICAP OPTIONS responses that don't have +# an Options-TTL header. +#Default: +# icap_default_options_ttl 60 + +# TAG: icap_persistent_connections on|off +# Whether or not Squid should use persistent connections to +# an ICAP server. +#Default: +# icap_persistent_connections on + +# TAG: adaptation_send_client_ip on|off +# If enabled, Squid shares HTTP client IP information with adaptation +# services. For ICAP, Squid adds the X-Client-IP header to ICAP requests. +# For eCAP, Squid sets the libecap::metaClientIp transaction option. +# +# See also: adaptation_uses_indirect_client +#Default: +# adaptation_send_client_ip off + +# TAG: adaptation_send_username on|off +# This sends authenticated HTTP client username (if available) to +# the adaptation service. +# +# For ICAP, the username value is encoded based on the +# icap_client_username_encode option and is sent using the header +# specified by the icap_client_username_header option. +#Default: +# adaptation_send_username off + +# TAG: icap_client_username_header +# ICAP request header name to use for adaptation_send_username. +#Default: +# icap_client_username_header X-Client-Username + +# TAG: icap_client_username_encode on|off +# Whether to base64 encode the authenticated client username. +#Default: +# icap_client_username_encode off + +# TAG: icap_service +# Defines a single ICAP service using the following format: +# +# icap_service id vectoring_point uri [option ...] +# +# id: ID +# an opaque identifier or name which is used to direct traffic to +# this specific service. Must be unique among all adaptation +# services in squid.conf. +# +# vectoring_point: reqmod_precache|reqmod_postcache|respmod_precache|respmod_postcache +# This specifies at which point of transaction processing the +# ICAP service should be activated. *_postcache vectoring points +# are not yet supported. +# +# uri: icap://servername:port/servicepath +# ICAP server and service location. +# +# ICAP does not allow a single service to handle both REQMOD and RESPMOD +# transactions. Squid does not enforce that requirement. You can specify +# services with the same service_url and different vectoring_points. You +# can even specify multiple identical services as long as their +# service_names differ. +# +# To activate a service, use the adaptation_access directive. To group +# services, use adaptation_service_chain and adaptation_service_set. +# +# Service options are separated by white space. ICAP services support +# the following name=value options: +# +# bypass=on|off|1|0 +# If set to 'on' or '1', the ICAP service is treated as +# optional. If the service cannot be reached or malfunctions, +# Squid will try to ignore any errors and process the message as +# if the service was not enabled. No all ICAP errors can be +# bypassed. If set to 0, the ICAP service is treated as +# essential and all ICAP errors will result in an error page +# returned to the HTTP client. +# +# Bypass is off by default: services are treated as essential. +# +# routing=on|off|1|0 +# If set to 'on' or '1', the ICAP service is allowed to +# dynamically change the current message adaptation plan by +# returning a chain of services to be used next. The services +# are specified using the X-Next-Services ICAP response header +# value, formatted as a comma-separated list of service names. +# Each named service should be configured in squid.conf. Other +# services are ignored. An empty X-Next-Services value results +# in an empty plan which ends the current adaptation. +# +# Dynamic adaptation plan may cross or cover multiple supported +# vectoring points in their natural processing order. +# +# Routing is not allowed by default: the ICAP X-Next-Services +# response header is ignored. +# +# ipv6=on|off +# Only has effect on split-stack systems. The default on those systems +# is to use IPv4-only connections. When set to 'on' this option will +# make Squid use IPv6-only connections to contact this ICAP service. +# +# on-overload=block|bypass|wait|force +# If the service Max-Connections limit has been reached, do +# one of the following for each new ICAP transaction: +# * block: send an HTTP error response to the client +# * bypass: ignore the "over-connected" ICAP service +# * wait: wait (in a FIFO queue) for an ICAP connection slot +# * force: proceed, ignoring the Max-Connections limit +# +# In SMP mode with N workers, each worker assumes the service +# connection limit is Max-Connections/N, even though not all +# workers may use a given service. +# +# The default value is "bypass" if service is bypassable, +# otherwise it is set to "wait". +# +# +# max-conn=number +# Use the given number as the Max-Connections limit, regardless +# of the Max-Connections value given by the service, if any. +# +# Older icap_service format without optional named parameters is +# deprecated but supported for backward compatibility. +# +#Example: +#icap_service svcBlocker reqmod_precache icap://icap1.mydomain.net:1344/reqmod bypass=0 +#icap_service svcLogger reqmod_precache icap://icap2.mydomain.net:1344/respmod routing=on +#Default: +# none + +# TAG: icap_class +# This deprecated option was documented to define an ICAP service +# chain, even though it actually defined a set of similar, redundant +# services, and the chains were not supported. +# +# To define a set of redundant services, please use the +# adaptation_service_set directive. For service chains, use +# adaptation_service_chain. +#Default: +# none + +# TAG: icap_access +# This option is deprecated. Please use adaptation_access, which +# has the same ICAP functionality, but comes with better +# documentation, and eCAP support. +#Default: +# none + +# eCAP OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: ecap_enable on|off +# Controls whether eCAP support is enabled. +#Default: +# ecap_enable off + +# TAG: ecap_service +# Defines a single eCAP service +# +# ecap_service id vectoring_point uri [option ...] +# +# id: ID +# an opaque identifier or name which is used to direct traffic to +# this specific service. Must be unique among all adaptation +# services in squid.conf. +# +# vectoring_point: reqmod_precache|reqmod_postcache|respmod_precache|respmod_postcache +# This specifies at which point of transaction processing the +# eCAP service should be activated. *_postcache vectoring points +# are not yet supported. +# +# uri: ecap://vendor/service_name?custom&cgi=style¶meters=optional +# Squid uses the eCAP service URI to match this configuration +# line with one of the dynamically loaded services. Each loaded +# eCAP service must have a unique URI. Obtain the right URI from +# the service provider. +# +# To activate a service, use the adaptation_access directive. To group +# services, use adaptation_service_chain and adaptation_service_set. +# +# Service options are separated by white space. eCAP services support +# the following name=value options: +# +# bypass=on|off|1|0 +# If set to 'on' or '1', the eCAP service is treated as optional. +# If the service cannot be reached or malfunctions, Squid will try +# to ignore any errors and process the message as if the service +# was not enabled. No all eCAP errors can be bypassed. +# If set to 'off' or '0', the eCAP service is treated as essential +# and all eCAP errors will result in an error page returned to the +# HTTP client. +# +# Bypass is off by default: services are treated as essential. +# +# routing=on|off|1|0 +# If set to 'on' or '1', the eCAP service is allowed to +# dynamically change the current message adaptation plan by +# returning a chain of services to be used next. +# +# Dynamic adaptation plan may cross or cover multiple supported +# vectoring points in their natural processing order. +# +# Routing is not allowed by default. +# +# Older ecap_service format without optional named parameters is +# deprecated but supported for backward compatibility. +# +# +#Example: +#ecap_service s1 reqmod_precache ecap://filters.R.us/leakDetector?on_error=block bypass=off +#ecap_service s2 respmod_precache ecap://filters.R.us/virusFilter config=/etc/vf.cfg bypass=on +#Default: +# none + +# TAG: loadable_modules +# Instructs Squid to load the specified dynamic module(s) or activate +# preloaded module(s). +#Example: +#loadable_modules /usr/lib/MinimalAdapter.so +#Default: +# none + +# MESSAGE ADAPTATION OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: adaptation_service_set +# +# Configures an ordered set of similar, redundant services. This is +# useful when hot standby or backup adaptation servers are available. +# +# adaptation_service_set set_name service_name1 service_name2 ... +# +# The named services are used in the set declaration order. The first +# applicable adaptation service from the set is used first. The next +# applicable service is tried if and only if the transaction with the +# previous service fails and the message waiting to be adapted is still +# intact. +# +# When adaptation starts, broken services are ignored as if they were +# not a part of the set. A broken service is a down optional service. +# +# The services in a set must be attached to the same vectoring point +# (e.g., pre-cache) and use the same adaptation method (e.g., REQMOD). +# +# If all services in a set are optional then adaptation failures are +# bypassable. If all services in the set are essential, then a +# transaction failure with one service may still be retried using +# another service from the set, but when all services fail, the master +# transaction fails as well. +# +# A set may contain a mix of optional and essential services, but that +# is likely to lead to surprising results because broken services become +# ignored (see above), making previously bypassable failures fatal. +# Technically, it is the bypassability of the last failed service that +# matters. +# +# See also: adaptation_access adaptation_service_chain +# +#Example: +#adaptation_service_set svcBlocker urlFilterPrimary urlFilterBackup +#adaptation service_set svcLogger loggerLocal loggerRemote +#Default: +# none + +# TAG: adaptation_service_chain +# +# Configures a list of complementary services that will be applied +# one-by-one, forming an adaptation chain or pipeline. This is useful +# when Squid must perform different adaptations on the same message. +# +# adaptation_service_chain chain_name service_name1 svc_name2 ... +# +# The named services are used in the chain declaration order. The first +# applicable adaptation service from the chain is used first. The next +# applicable service is applied to the successful adaptation results of +# the previous service in the chain. +# +# When adaptation starts, broken services are ignored as if they were +# not a part of the chain. A broken service is a down optional service. +# +# Request satisfaction terminates the adaptation chain because Squid +# does not currently allow declaration of RESPMOD services at the +# "reqmod_precache" vectoring point (see icap_service or ecap_service). +# +# The services in a chain must be attached to the same vectoring point +# (e.g., pre-cache) and use the same adaptation method (e.g., REQMOD). +# +# A chain may contain a mix of optional and essential services. If an +# essential adaptation fails (or the failure cannot be bypassed for +# other reasons), the master transaction fails. Otherwise, the failure +# is bypassed as if the failed adaptation service was not in the chain. +# +# See also: adaptation_access adaptation_service_set +# +#Example: +#adaptation_service_chain svcRequest requestLogger urlFilter leakDetector +#Default: +# none + +# TAG: adaptation_access +# Sends an HTTP transaction to an ICAP or eCAP adaptation service. +# +# adaptation_access service_name allow|deny [!]aclname... +# adaptation_access set_name allow|deny [!]aclname... +# +# At each supported vectoring point, the adaptation_access +# statements are processed in the order they appear in this +# configuration file. Statements pointing to the following services +# are ignored (i.e., skipped without checking their ACL): +# +# - services serving different vectoring points +# - "broken-but-bypassable" services +# - "up" services configured to ignore such transactions +# (e.g., based on the ICAP Transfer-Ignore header). +# +# When a set_name is used, all services in the set are checked +# using the same rules, to find the first applicable one. See +# adaptation_service_set for details. +# +# If an access list is checked and there is a match, the +# processing stops: For an "allow" rule, the corresponding +# adaptation service is used for the transaction. For a "deny" +# rule, no adaptation service is activated. +# +# It is currently not possible to apply more than one adaptation +# service at the same vectoring point to the same HTTP transaction. +# +# See also: icap_service and ecap_service +# +#Example: +#adaptation_access service_1 allow all +#Default: +# Allow, unless rules exist in squid.conf. + +# TAG: adaptation_service_iteration_limit +# Limits the number of iterations allowed when applying adaptation +# services to a message. If your longest adaptation set or chain +# may have more than 16 services, increase the limit beyond its +# default value of 16. If detecting infinite iteration loops sooner +# is critical, make the iteration limit match the actual number +# of services in your longest adaptation set or chain. +# +# Infinite adaptation loops are most likely with routing services. +# +# See also: icap_service routing=1 +#Default: +# adaptation_service_iteration_limit 16 + +# TAG: adaptation_masterx_shared_names +# For each master transaction (i.e., the HTTP request and response +# sequence, including all related ICAP and eCAP exchanges), Squid +# maintains a table of metadata. The table entries are (name, value) +# pairs shared among eCAP and ICAP exchanges. The table is destroyed +# with the master transaction. +# +# This option specifies the table entry names that Squid must accept +# from and forward to the adaptation transactions. +# +# An ICAP REQMOD or RESPMOD transaction may set an entry in the +# shared table by returning an ICAP header field with a name +# specified in adaptation_masterx_shared_names. +# +# An eCAP REQMOD or RESPMOD transaction may set an entry in the +# shared table by implementing the libecap::visitEachOption() API +# to provide an option with a name specified in +# adaptation_masterx_shared_names. +# +# Squid will store and forward the set entry to subsequent adaptation +# transactions within the same master transaction scope. +# +# Only one shared entry name is supported at this time. +# +#Example: +## share authentication information among ICAP services +#adaptation_masterx_shared_names X-Subscriber-ID +#Default: +# none + +# TAG: adaptation_meta +# This option allows Squid administrator to add custom ICAP request +# headers or eCAP options to Squid ICAP requests or eCAP transactions. +# Use it to pass custom authentication tokens and other +# transaction-state related meta information to an ICAP/eCAP service. +# +# The addition of a meta header is ACL-driven: +# adaptation_meta name value [!]aclname ... +# +# Processing for a given header name stops after the first ACL list match. +# Thus, it is impossible to add two headers with the same name. If no ACL +# lists match for a given header name, no such header is added. For +# example: +# +# # do not debug transactions except for those that need debugging +# adaptation_meta X-Debug 1 needs_debugging +# +# # log all transactions except for those that must remain secret +# adaptation_meta X-Log 1 !keep_secret +# +# # mark transactions from users in the "G 1" group +# adaptation_meta X-Authenticated-Groups "G 1" authed_as_G1 +# +# The "value" parameter may be a regular squid.conf token or a "double +# quoted string". Within the quoted string, use backslash (\) to escape +# any character, which is currently only useful for escaping backslashes +# and double quotes. For example, +# "this string has one backslash (\\) and two \"quotes\"" +# +# Used adaptation_meta header values may be logged via %note +# logformat code. If multiple adaptation_meta headers with the same name +# are used during master transaction lifetime, the header values are +# logged in the order they were used and duplicate values are ignored +# (only the first repeated value will be logged). +#Default: +# none + +# TAG: icap_retry +# This ACL determines which retriable ICAP transactions are +# retried. Transactions that received a complete ICAP response +# and did not have to consume or produce HTTP bodies to receive +# that response are usually retriable. +# +# icap_retry allow|deny [!]aclname ... +# +# Squid automatically retries some ICAP I/O timeouts and errors +# due to persistent connection race conditions. +# +# See also: icap_retry_limit +#Default: +# icap_retry deny all + +# TAG: icap_retry_limit +# Limits the number of retries allowed. +# +# Communication errors due to persistent connection race +# conditions are unavoidable, automatically retried, and do not +# count against this limit. +# +# See also: icap_retry +#Default: +# No retries are allowed. + +# DNS OPTIONS +# ----------------------------------------------------------------------------- + +# TAG: check_hostnames +# For security and stability reasons Squid can check +# hostnames for Internet standard RFC compliance. If you want +# Squid to perform these checks turn this directive on. +#Default: +# check_hostnames off + +# TAG: allow_underscore +# Underscore characters is not strictly allowed in Internet hostnames +# but nevertheless used by many sites. Set this to off if you want +# Squid to be strict about the standard. +# This check is performed only when check_hostnames is set to on. +#Default: +# allow_underscore on + +# TAG: dns_retransmit_interval +# Initial retransmit interval for DNS queries. The interval is +# doubled each time all configured DNS servers have been tried. +#Default: +# dns_retransmit_interval 5 seconds + +# TAG: dns_timeout +# DNS Query timeout. If no response is received to a DNS query +# within this time all DNS servers for the queried domain +# are assumed to be unavailable. +#Default: +# dns_timeout 30 seconds + +# TAG: dns_packet_max +# Maximum number of bytes packet size to advertise via EDNS. +# Set to "none" to disable EDNS large packet support. +# +# For legacy reasons DNS UDP replies will default to 512 bytes which +# is too small for many responses. EDNS provides a means for Squid to +# negotiate receiving larger responses back immediately without having +# to failover with repeat requests. Responses larger than this limit +# will retain the old behaviour of failover to TCP DNS. +# +# Squid has no real fixed limit internally, but allowing packet sizes +# over 1500 bytes requires network jumbogram support and is usually not +# necessary. +# +# WARNING: The RFC also indicates that some older resolvers will reply +# with failure of the whole request if the extension is added. Some +# resolvers have already been identified which will reply with mangled +# EDNS response on occasion. Usually in response to many-KB jumbogram +# sizes being advertised by Squid. +# Squid will currently treat these both as an unable-to-resolve domain +# even if it would be resolvable without EDNS. +#Default: +# EDNS disabled + +# TAG: dns_defnames on|off +# Normally the RES_DEFNAMES resolver option is disabled +# (see res_init(3)). This prevents caches in a hierarchy +# from interpreting single-component hostnames locally. To allow +# Squid to handle single-component names, enable this option. +#Default: +# Search for single-label domain names is disabled. + +# TAG: dns_multicast_local on|off +# When set to on, Squid sends multicast DNS lookups on the local +# network for domains ending in .local and .arpa. +# This enables local servers and devices to be contacted in an +# ad-hoc or zero-configuration network environment. +#Default: +# Search for .local and .arpa names is disabled. + +# TAG: dns_nameservers +# Use this if you want to specify a list of DNS name servers +# (IP addresses) to use instead of those given in your +# /etc/resolv.conf file. +# +# On Windows platforms, if no value is specified here or in +# the /etc/resolv.conf file, the list of DNS name servers are +# taken from the Windows registry, both static and dynamic DHCP +# configurations are supported. +# +# Example: dns_nameservers 10.0.0.1 192.172.0.4 +#Default: +# Use operating system definitions + +# TAG: hosts_file +# Location of the host-local IP name-address associations +# database. Most Operating Systems have such a file on different +# default locations: +# - Un*X & Linux: /etc/hosts +# - Windows NT/2000: %SystemRoot%\system32\drivers\etc\hosts +# (%SystemRoot% value install default is c:\winnt) +# - Windows XP/2003: %SystemRoot%\system32\drivers\etc\hosts +# (%SystemRoot% value install default is c:\windows) +# - Windows 9x/Me: %windir%\hosts +# (%windir% value is usually c:\windows) +# - Cygwin: /etc/hosts +# +# The file contains newline-separated definitions, in the +# form ip_address_in_dotted_form name [name ...] names are +# whitespace-separated. Lines beginning with an hash (#) +# character are comments. +# +# The file is checked at startup and upon configuration. +# If set to 'none', it won't be checked. +# If append_domain is used, that domain will be added to +# domain-local (i.e. not containing any dot character) host +# definitions. +#Default: +# hosts_file /etc/hosts + +# TAG: append_domain +# Appends local domain name to hostnames without any dots in +# them. append_domain must begin with a period. +# +# Be warned there are now Internet names with no dots in +# them using only top-domain names, so setting this may +# cause some Internet sites to become unavailable. +# +#Example: +# append_domain .yourdomain.com +#Default: +# Use operating system definitions + +# TAG: ignore_unknown_nameservers +# By default Squid checks that DNS responses are received +# from the same IP addresses they are sent to. If they +# don't match, Squid ignores the response and writes a warning +# message to cache.log. You can allow responses from unknown +# nameservers by setting this option to 'off'. +#Default: +# ignore_unknown_nameservers on + +# TAG: dns_v4_first +# With the IPv6 Internet being as fast or faster than IPv4 Internet +# for most networks Squid prefers to contact websites over IPv6. +# +# This option reverses the order of preference to make Squid contact +# dual-stack websites over IPv4 first. Squid will still perform both +# IPv6 and IPv4 DNS lookups before connecting. +# +# WARNING: +# This option will restrict the situations under which IPv6 +# connectivity is used (and tested), potentially hiding network +# problems which would otherwise be detected and warned about. +#Default: +# dns_v4_first off + +# TAG: ipcache_size (number of entries) +# Maximum number of DNS IP cache entries. +#Default: +# ipcache_size 1024 + +# TAG: ipcache_low (percent) +#Default: +# ipcache_low 90 + +# TAG: ipcache_high (percent) +# The size, low-, and high-water marks for the IP cache. +#Default: +# ipcache_high 95 + +# TAG: fqdncache_size (number of entries) +# Maximum number of FQDN cache entries. +#Default: +# fqdncache_size 1024 + +# MISCELLANEOUS +# ----------------------------------------------------------------------------- + +# TAG: configuration_includes_quoted_values on|off +# If set, Squid will recognize each "quoted string" after a configuration +# directive as a single parameter. The quotes are stripped before the +# parameter value is interpreted or used. +# See "Values with spaces, quotes, and other special characters" +# section for more details. +#Default: +# configuration_includes_quoted_values off + +# TAG: memory_pools on|off +# If set, Squid will keep pools of allocated (but unused) memory +# available for future use. If memory is a premium on your +# system and you believe your malloc library outperforms Squid +# routines, disable this. +#Default: +# memory_pools on + +# TAG: memory_pools_limit (bytes) +# Used only with memory_pools on: +# memory_pools_limit 50 MB +# +# If set to a non-zero value, Squid will keep at most the specified +# limit of allocated (but unused) memory in memory pools. All free() +# requests that exceed this limit will be handled by your malloc +# library. Squid does not pre-allocate any memory, just safe-keeps +# objects that otherwise would be free()d. Thus, it is safe to set +# memory_pools_limit to a reasonably high value even if your +# configuration will use less memory. +# +# If set to none, Squid will keep all memory it can. That is, there +# will be no limit on the total amount of memory used for safe-keeping. +# +# To disable memory allocation optimization, do not set +# memory_pools_limit to 0 or none. Set memory_pools to "off" instead. +# +# An overhead for maintaining memory pools is not taken into account +# when the limit is checked. This overhead is close to four bytes per +# object kept. However, pools may actually _save_ memory because of +# reduced memory thrashing in your malloc library. +#Default: +# memory_pools_limit 5 MB + +# TAG: forwarded_for on|off|transparent|truncate|delete +# If set to "on", Squid will append your client's IP address +# in the HTTP requests it forwards. By default it looks like: +# +# X-Forwarded-For: 192.1.2.3 +# +# If set to "off", it will appear as +# +# X-Forwarded-For: unknown +# +# If set to "transparent", Squid will not alter the +# X-Forwarded-For header in any way. +# +# If set to "delete", Squid will delete the entire +# X-Forwarded-For header. +# +# If set to "truncate", Squid will remove all existing +# X-Forwarded-For entries, and place the client IP as the sole entry. +#Default: +# forwarded_for on + +# TAG: cachemgr_passwd +# Specify passwords for cachemgr operations. +# +# Usage: cachemgr_passwd password action action ... +# +# Some valid actions are (see cache manager menu for a full list): +# 5min +# 60min +# asndb +# authenticator +# cbdata +# client_list +# comm_incoming +# config * +# counters +# delay +# digest_stats +# dns +# events +# filedescriptors +# fqdncache +# histograms +# http_headers +# info +# io +# ipcache +# mem +# menu +# netdb +# non_peers +# objects +# offline_toggle * +# pconn +# peer_select +# reconfigure * +# redirector +# refresh +# server_list +# shutdown * +# store_digest +# storedir +# utilization +# via_headers +# vm_objects +# +# * Indicates actions which will not be performed without a +# valid password, others can be performed if not listed here. +# +# To disable an action, set the password to "disable". +# To allow performing an action without a password, set the +# password to "none". +# +# Use the keyword "all" to set the same password for all actions. +# +#Example: +# cachemgr_passwd secret shutdown +# cachemgr_passwd lesssssssecret info stats/objects +# cachemgr_passwd disable all +#Default: +# No password. Actions which require password are denied. + +# TAG: client_db on|off +# If you want to disable collecting per-client statistics, +# turn off client_db here. +#Default: +# client_db on + +# TAG: refresh_all_ims on|off +# When you enable this option, squid will always check +# the origin server for an update when a client sends an +# If-Modified-Since request. Many browsers use IMS +# requests when the user requests a reload, and this +# ensures those clients receive the latest version. +# +# By default (off), squid may return a Not Modified response +# based on the age of the cached version. +#Default: +# refresh_all_ims off + +# TAG: reload_into_ims on|off +# When you enable this option, client no-cache or ``reload'' +# requests will be changed to If-Modified-Since requests. +# Doing this VIOLATES the HTTP standard. Enabling this +# feature could make you liable for problems which it +# causes. +# +# see also refresh_pattern for a more selective approach. +#Default: +# reload_into_ims off + +# TAG: connect_retries +# This sets the maximum number of connection attempts made for each +# TCP connection. The connect_retries attempts must all still +# complete within the connection timeout period. +# +# The default is not to re-try if the first connection attempt fails. +# The (not recommended) maximum is 10 tries. +# +# A warning message will be generated if it is set to a too-high +# value and the configured value will be over-ridden. +# +# Note: These re-tries are in addition to forward_max_tries +# which limit how many different addresses may be tried to find +# a useful server. +#Default: +# Do not retry failed connections. + +# TAG: retry_on_error +# If set to ON Squid will automatically retry requests when +# receiving an error response with status 403 (Forbidden), +# 500 (Internal Error), 501 or 503 (Service not available). +# Status 502 and 504 (Gateway errors) are always retried. +# +# This is mainly useful if you are in a complex cache hierarchy to +# work around access control errors. +# +# NOTE: This retry will attempt to find another working destination. +# Which is different from the server which just failed. +#Default: +# retry_on_error off + +# TAG: as_whois_server +# WHOIS server to query for AS numbers. NOTE: AS numbers are +# queried only when Squid starts up, not for every request. +#Default: +# as_whois_server whois.ra.net + +# TAG: offline_mode +# Enable this option and Squid will never try to validate cached +# objects. +#Default: +# offline_mode off + +# TAG: uri_whitespace +# What to do with requests that have whitespace characters in the +# URI. Options: +# +# strip: The whitespace characters are stripped out of the URL. +# This is the behavior recommended by RFC2396 and RFC3986 +# for tolerant handling of generic URI. +# NOTE: This is one difference between generic URI and HTTP URLs. +# +# deny: The request is denied. The user receives an "Invalid +# Request" message. +# This is the behaviour recommended by RFC2616 for safe +# handling of HTTP request URL. +# +# allow: The request is allowed and the URI is not changed. The +# whitespace characters remain in the URI. Note the +# whitespace is passed to redirector processes if they +# are in use. +# Note this may be considered a violation of RFC2616 +# request parsing where whitespace is prohibited in the +# URL field. +# +# encode: The request is allowed and the whitespace characters are +# encoded according to RFC1738. +# +# chop: The request is allowed and the URI is chopped at the +# first whitespace. +# +# +# NOTE the current Squid implementation of encode and chop violates +# RFC2616 by not using a 301 redirect after altering the URL. +#Default: +# uri_whitespace strip + +# TAG: chroot +# Specifies a directory where Squid should do a chroot() while +# initializing. This also causes Squid to fully drop root +# privileges after initializing. This means, for example, if you +# use a HTTP port less than 1024 and try to reconfigure, you may +# get an error saying that Squid can not open the port. +#Default: +# none + +# TAG: balance_on_multiple_ip +# Modern IP resolvers in squid sort lookup results by preferred access. +# By default squid will use these IP in order and only rotates to +# the next listed when the most preffered fails. +# +# Some load balancing servers based on round robin DNS have been +# found not to preserve user session state across requests +# to different IP addresses. +# +# Enabling this directive Squid rotates IP's per request. +#Default: +# balance_on_multiple_ip off + +# TAG: pipeline_prefetch +# HTTP clients may send a pipeline of 1+N requests to Squid using a +# single connection, without waiting for Squid to respond to the first +# of those requests. This option limits the number of concurrent +# requests Squid will try to handle in parallel. If set to N, Squid +# will try to receive and process up to 1+N requests on the same +# connection concurrently. +# +# Defaults to 0 (off) for bandwidth management and access logging +# reasons. +# +# NOTE: pipelining requires persistent connections to clients. +# +# WARNING: pipelining breaks NTLM and Negotiate/Kerberos authentication. +#Default: +# Do not pre-parse pipelined requests. + +# TAG: high_response_time_warning (msec) +# If the one-minute median response time exceeds this value, +# Squid prints a WARNING with debug level 0 to get the +# administrators attention. The value is in milliseconds. +#Default: +# disabled. + +# TAG: high_page_fault_warning +# If the one-minute average page fault rate exceeds this +# value, Squid prints a WARNING with debug level 0 to get +# the administrators attention. The value is in page faults +# per second. +#Default: +# disabled. + +# TAG: high_memory_warning +# Note: This option is only available if Squid is rebuilt with the +# GNU Malloc with mstats() +# +# If the memory usage (as determined by gnumalloc, if available and used) +# exceeds this amount, Squid prints a WARNING with debug level 0 to get +# the administrators attention. +#Default: +# disabled. + +# TAG: sleep_after_fork (microseconds) +# When this is set to a non-zero value, the main Squid process +# sleeps the specified number of microseconds after a fork() +# system call. This sleep may help the situation where your +# system reports fork() failures due to lack of (virtual) +# memory. Note, however, if you have a lot of child +# processes, these sleep delays will add up and your +# Squid will not service requests for some amount of time +# until all the child processes have been started. +# On Windows value less then 1000 (1 milliseconds) are +# rounded to 1000. +#Default: +# sleep_after_fork 0 + +# TAG: windows_ipaddrchangemonitor on|off +# Note: This option is only available if Squid is rebuilt with the +# MS Windows +# +# On Windows Squid by default will monitor IP address changes and will +# reconfigure itself after any detected event. This is very useful for +# proxies connected to internet with dial-up interfaces. +# In some cases (a Proxy server acting as VPN gateway is one) it could be +# desiderable to disable this behaviour setting this to 'off'. +# Note: after changing this, Squid service must be restarted. +#Default: +# windows_ipaddrchangemonitor on + +# TAG: eui_lookup +# Whether to lookup the EUI or MAC address of a connected client. +#Default: +# eui_lookup on + +# TAG: max_filedescriptors +# Reduce the maximum number of filedescriptors supported below +# the usual operating system defaults. +# +# Remove from squid.conf to inherit the current ulimit setting. +# +# Note: Changing this requires a restart of Squid. Also +# not all I/O types supports large values (eg on Windows). +#Default: +# Use operating system limits set by ulimit. -- cgit v0.10-9-g596f From 9966ebcd8494083a1b87e7d368bedfffb5e5fe59 Mon Sep 17 00:00:00 2001 From: Tobias Wolf Date: Fri, 9 Nov 2018 23:48:56 +0100 Subject: Add new flag --show-body/-B to print body This should help with figuring out ia problem at a glance when enabled for healthz endpoints on web APIs, for example. The content of the body can point to what the problem is and help with diagnostics. Fixes #1559 diff --git a/plugins/check_http.c b/plugins/check_http.c index 1e2a54c..856e1e9 100644 --- a/plugins/check_http.c +++ b/plugins/check_http.c @@ -120,6 +120,7 @@ int use_ssl = FALSE; int use_sni = FALSE; int verbose = FALSE; int show_extended_perfdata = FALSE; +int show_body = FALSE; int sd; int min_page_len = 0; int max_page_len = 0; @@ -240,6 +241,7 @@ process_arguments (int argc, char **argv) {"use-ipv4", no_argument, 0, '4'}, {"use-ipv6", no_argument, 0, '6'}, {"extended-perfdata", no_argument, 0, 'E'}, + {"show-body", no_argument, 0, 'B'}, {0, 0, 0, 0} }; @@ -260,7 +262,7 @@ process_arguments (int argc, char **argv) } while (1) { - c = getopt_long (argc, argv, "Vvh46t:c:w:A:k:H:P:j:T:I:a:b:d:e:p:s:R:r:u:f:C:J:K:nlLS::m:M:NE", longopts, &option); + c = getopt_long (argc, argv, "Vvh46t:c:w:A:k:H:P:j:T:I:a:b:d:e:p:s:R:r:u:f:C:J:K:nlLS::m:M:NEB", longopts, &option); if (c == -1 || c == EOF) break; @@ -547,6 +549,9 @@ process_arguments (int argc, char **argv) case 'E': /* show extended perfdata */ show_extended_perfdata = TRUE; break; + case 'B': /* print body content after status line */ + show_body = TRUE; + break; } } @@ -1300,6 +1305,9 @@ check_http (void) perfd_time (elapsed_time), perfd_size (page_len)); + if (show_body) + xasprintf (&msg, _("%s\n%s"), msg, page); + result = max_state_alt(get_status(elapsed_time, thlds), result); die (result, "HTTP %s: %s\n", state_text(result), msg); @@ -1621,6 +1629,8 @@ print_help (void) printf (" %s\n", _("Any other tags to be sent in http header. Use multiple times for additional headers")); printf (" %s\n", "-E, --extended-perfdata"); printf (" %s\n", _("Print additional performance data")); + printf (" %s\n", "-B, --show-body"); + printf (" %s\n", _("Print body content below status line")); printf (" %s\n", "-L, --link"); printf (" %s\n", _("Wrap output in HTML link (obsoleted by urlize)")); printf (" %s\n", "-f, --onredirect="); -- cgit v0.10-9-g596f From 910894a4e255cdb88e37f17a06f1393853544b84 Mon Sep 17 00:00:00 2001 From: Kostyantyn Hushchyn Date: Tue, 14 Dec 2010 12:51:25 +0200 Subject: check_cluster.c: Added data argument validation. Signed-off-by: Kostyantyn Hushchyn diff --git a/plugins/check_cluster.c b/plugins/check_cluster.c index b86e501..e1ede9f 100644 --- a/plugins/check_cluster.c +++ b/plugins/check_cluster.c @@ -143,6 +143,7 @@ int main(int argc, char **argv){ int process_arguments(int argc, char **argv){ int c; + char *ptr; int option=0; static struct option longopts[]={ {"data", required_argument,0,'d'}, @@ -188,6 +189,15 @@ int process_arguments(int argc, char **argv){ case 'd': /* data values */ data_vals=(char *)strdup(optarg); + /* validate data */ + for (ptr=data_vals;ptr!=NULL;ptr+=2){ + if (ptr[0]<'0' || ptr[0]>'3') + return ERROR; + if (ptr[1]=='\0') + break; + if (ptr[1]!=',') + return ERROR; + } break; case 'l': /* text label */ -- cgit v0.10-9-g596f From 19be129ff8986511e82fb2c628c04c28a080210b Mon Sep 17 00:00:00 2001 From: Sven Nierlein Date: Tue, 4 Dec 2018 09:48:45 +0100 Subject: tests: enable plugins-root tests diff --git a/Makefile.am b/Makefile.am index 7e0d413..df1bcbb 100644 --- a/Makefile.am +++ b/Makefile.am @@ -34,6 +34,7 @@ test test-debug: if test "$(PERLMODS_DIR)" != ""; then cd perlmods && $(MAKE) $@; fi cd plugins && $(MAKE) $@ cd plugins-scripts && $(MAKE) $@ + cd plugins-root && $(MAKE) $@ # Solaris pkgmk BUILDDIR = build-solaris -- cgit v0.10-9-g596f From 660e0cf4e2968c97adeabf9ba91a54f1bd4ad240 Mon Sep 17 00:00:00 2001 From: Sven Nierlein Date: Tue, 4 Dec 2018 14:08:21 +0100 Subject: tests: skip some tests if Monitoring::Plugin::Range isn't available The check_file_age uses Monitoring::Plugin::Range internally. Skip thoses tests if the module isn't available. diff --git a/plugins-scripts/t/check_file_age.t b/plugins-scripts/t/check_file_age.t index ebf673f..8b87670 100644 --- a/plugins-scripts/t/check_file_age.t +++ b/plugins-scripts/t/check_file_age.t @@ -20,105 +20,74 @@ my $temp_link = "/tmp/check_file_age.link.tmp"; unlink $temp_file, $temp_link; -$result = NPTest->testCmd( - "./check_file_age" - ); +$result = NPTest->testCmd("./check_file_age"); cmp_ok( $result->return_code, '==', 3, "Missing parameters" ); like ( $result->output, $unknownOutput, "Output for unknown correct" ); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file" - ); +$result = NPTest->testCmd("./check_file_age -f $temp_file"); cmp_ok( $result->return_code, '==', 2, "File not exists" ); like ( $result->output, $criticalOutput, "Output for file missing correct" ); write_chars(100); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file" - ); +$result = NPTest->testCmd("./check_file_age -f $temp_file"); cmp_ok( $result->return_code, '==', 0, "File is new enough" ); like ( $result->output, $successOutput, "Output for success correct" ); sleep 2; -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -w 1" - ); +$result = NPTest->testCmd("./check_file_age -f $temp_file -w 1"); cmp_ok( $result->return_code, '==', 1, "Warning for file over 1 second old" ); like ( $result->output, $warningOutput, "Output for warning correct" ); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -w 0:1" - ); -cmp_ok( $result->return_code, '==', 1, "Warning for file over 1 second old by range" ); -like ( $result->output, $warningOutput, "Output for warning by range correct" ); - -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 1" - ); +$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1"); cmp_ok( $result->return_code, '==', 2, "Critical for file over 1 second old" ); like ( $result->output, $criticalOutput, "Output for critical correct" ); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 0:1" - ); -cmp_ok( $result->return_code, '==', 2, "Critical for file over 1 second old by range" ); -like ( $result->output, $criticalOutput, "Output for critical by range correct" ); - -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 1000 -W 100" - ); +$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 100"); cmp_ok( $result->return_code, '==', 0, "Checking file size" ); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 0:1000 -W 0:100" - ); -cmp_ok( $result->return_code, '==', 0, "Checking file size by range" ); - -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 1000 -W 100" - ); +$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 100"); like( $result->output, $performanceOutput, "Checking for performance Output" ); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 1000 -W 100" - ); +$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 100"); like( $result->output, $performanceOutput, "Checking for performance Output from range" ); -$result = NPTest->testCmd( - "./check_file_age -f /non/existent --ignore-missing" - ); +$result = NPTest->testCmd("./check_file_age -f /non/existent --ignore-missing"); cmp_ok( $result->return_code, '==', 0, "Honours --ignore-missing" ); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 1000 -W 101" - ); +$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 101"); cmp_ok( $result->return_code, '==', 1, "One byte too short" ); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 1000 -W 101:" - ); -cmp_ok( $result->return_code, '==', 1, "One byte too short by range" ); - -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 1000 -C 101" - ); +$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -C 101"); cmp_ok( $result->return_code, '==', 2, "One byte too short - critical" ); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 1000 -C 101:" - ); -cmp_ok( $result->return_code, '==', 2, "One byte too short by range - critical" ); +SKIP: { + eval 'use Monitoring::Plugin::Range'; + skip "Monitoring::Plugin::Range module require", 9 if $@; + + $result = NPTest->testCmd("./check_file_age -f $temp_file -w 0:1"); + cmp_ok( $result->return_code, '==', 1, "Warning for file over 1 second old by range" ); + like ( $result->output, $warningOutput, "Output for warning by range correct" ); + + $result = NPTest->testCmd("./check_file_age -f $temp_file -c 0:1"); + cmp_ok( $result->return_code, '==', 2, "Critical for file over 1 second old by range" ); + like ( $result->output, $criticalOutput, "Output for critical by range correct" ); + + $result = NPTest->testCmd("./check_file_age -f $temp_file -c 0:1000 -W 0:100"); + cmp_ok( $result->return_code, '==', 0, "Checking file size by range" ); + + $result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 101:"); + cmp_ok( $result->return_code, '==', 1, "One byte too short by range" ); + + $result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 0:99"); + cmp_ok( $result->return_code, '==', 1, "One byte too long by range" ); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 1000 -W 0:99" - ); -cmp_ok( $result->return_code, '==', 1, "One byte too long by range" ); + $result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -C 101:"); + cmp_ok( $result->return_code, '==', 2, "One byte too short by range - critical" ); -$result = NPTest->testCmd( - "./check_file_age -f $temp_file -c 1000 -C 0:99" - ); -cmp_ok( $result->return_code, '==', 2, "One byte too long by range - critical" ); + $result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -C 0:99"); + cmp_ok( $result->return_code, '==', 2, "One byte too long by range - critical" ); +}; symlink $temp_file, $temp_link or die "Cannot create symlink"; $result = NPTest->testCmd("./check_file_age -f $temp_link -c 10"); -- cgit v0.10-9-g596f From b69eb5afed40f14ba648c3358e2b46aeb75b0c5d Mon Sep 17 00:00:00 2001 From: Sven Nierlein Date: Tue, 4 Dec 2018 09:50:25 +0100 Subject: NPTest: unify whitespace diff --git a/NPTest.pm b/NPTest.pm index f72ed2d..2248e8e 100644 --- a/NPTest.pm +++ b/NPTest.pm @@ -53,8 +53,8 @@ developer to interactively request test parameter information from the user. The user can accept the developer's default value or reply "none" which will then be returned as "" for the test to skip if appropriate. -If a parameter needs to be entered and the test is run without a tty -attached (such as a cronjob), the parameter will be assigned as if it +If a parameter needs to be entered and the test is run without a tty +attached (such as a cronjob), the parameter will be assigned as if it was "none". Tests can check for the parameter and skip if not set. Responses are stored in an external, file-based cache so subsequent test @@ -249,26 +249,26 @@ sub checkCmd { if ( scalar( grep { $_ == $exitStatus } @{$desiredExitStatus} ) ) { - $desiredExitStatus = $exitStatus; + $desiredExitStatus = $exitStatus; } else { - $desiredExitStatus = -1; + $desiredExitStatus = -1; } } elsif ( ref $desiredExitStatus eq "HASH" ) { if ( exists( ${$desiredExitStatus}{$exitStatus} ) ) { - if ( defined( ${$desiredExitStatus}{$exitStatus} ) ) - { - $testOutput = ${$desiredExitStatus}{$exitStatus}; - } - $desiredExitStatus = $exitStatus; + if ( defined( ${$desiredExitStatus}{$exitStatus} ) ) + { + $testOutput = ${$desiredExitStatus}{$exitStatus}; + } + $desiredExitStatus = $exitStatus; } else { - $desiredExitStatus = -1; + $desiredExitStatus = -1; } } @@ -332,12 +332,12 @@ sub getTestParameter my( $param, $envvar, $default, $brief, $scoped ); my $new_style; if (scalar @_ <= 3) { - ($param, $brief, $default) = @_; - $envvar = $param; - $new_style = 1; + ($param, $brief, $default) = @_; + $envvar = $param; + $new_style = 1; } else { - ( $param, $envvar, $default, $brief, $scoped ) = @_; - $new_style = 0; + ( $param, $envvar, $default, $brief, $scoped ) = @_; + $new_style = 0; } # Apply default values for optional arguments @@ -394,7 +394,7 @@ sub getTestParameter print STDERR "\n"; if ($userResponse =~ /^(na|none)$/) { - $userResponse = ""; + $userResponse = ""; } # define all user responses at global scope @@ -422,7 +422,7 @@ sub SearchCache { return $CACHE{$param}; } - return undef; # Need this to say "nothing found" + return undef; # Need this to say "nothing found" } sub SetCacheParameter @@ -542,10 +542,10 @@ sub DetermineTestHarnessDirectory push ( @dirs, "./tests"); } - if ( @dirs > 0 ) - { - return @dirs; - } + if ( @dirs > 0 ) + { + return @dirs; + } # To be honest I don't understand which case satisfies the # original code in test.pl : when $tstdir == `pwd` w.r.t. @@ -611,73 +611,73 @@ sub TestsFrom # All the new object oriented stuff below -sub new { - my $type = shift; - my $self = {}; - return bless $self, $type; +sub new { + my $type = shift; + my $self = {}; + return bless $self, $type; } # Accessors sub return_code { - my $self = shift; - if (@_) { - return $self->{return_code} = shift; - } else { - return $self->{return_code}; - } + my $self = shift; + if (@_) { + return $self->{return_code} = shift; + } else { + return $self->{return_code}; + } } sub output { - my $self = shift; - if (@_) { - return $self->{output} = shift; - } else { - return $self->{output}; - } + my $self = shift; + if (@_) { + return $self->{output} = shift; + } else { + return $self->{output}; + } } sub perf_output { - my $self = shift; - $_ = $self->{output}; - /\|(.*)$/; - return $1 || ""; + my $self = shift; + $_ = $self->{output}; + /\|(.*)$/; + return $1 || ""; } sub only_output { - my $self = shift; - $_ = $self->{output}; - /(.*?)\|/; - return $1 || ""; + my $self = shift; + $_ = $self->{output}; + /(.*?)\|/; + return $1 || ""; } sub testCmd { - my $class = shift; - my $command = shift or die "No command passed to testCmd"; - my $timeout = shift || 120; - my $object = $class->new; - - local $SIG{'ALRM'} = sub { die("timeout in command: $command"); }; - alarm($timeout); # no test should take longer than 120 seconds - - my $output = `$command`; - $object->return_code($? >> 8); - $_ = $? & 127; - if ($_) { - die "Got signal $_ for command $command"; - } - chomp $output; - $object->output($output); - - alarm(0); - - my ($pkg, $file, $line) = caller(0); - print "Testing: $command", $/; - if ($ENV{'NPTEST_DEBUG'}) { - print "testCmd: Called from line $line in $file", $/; - print "Output: ", $object->output, $/; - print "Return code: ", $object->return_code, $/; - } - - return $object; + my $class = shift; + my $command = shift or die "No command passed to testCmd"; + my $timeout = shift || 120; + my $object = $class->new; + + local $SIG{'ALRM'} = sub { die("timeout in command: $command"); }; + alarm($timeout); # no test should take longer than 120 seconds + + my $output = `$command`; + $object->return_code($? >> 8); + $_ = $? & 127; + if ($_) { + die "Got signal $_ for command $command"; + } + chomp $output; + $object->output($output); + + alarm(0); + + my ($pkg, $file, $line) = caller(0); + print "Testing: $command", $/; + if ($ENV{'NPTEST_DEBUG'}) { + print "testCmd: Called from line $line in $file", $/; + print "Output: ", $object->output, $/; + print "Return code: ", $object->return_code, $/; + } + + return $object; } # do we have ipv6 -- cgit v0.10-9-g596f From d7dcca22ae2f7bdf0ee2282e00c1999de0b58e26 Mon Sep 17 00:00:00 2001 From: Sven Nierlein Date: Tue, 4 Dec 2018 15:20:18 +0100 Subject: tests: rework test parameters there were 2 variants of calling getTestParameter: - parameter, description, default value - parameter, env value, default value, description, scope While scope was never actually used and having 2 names for the same value led to having 2 different entries in the cache file for the same configuration. This commit removes the variants and simplifies tests parameters by only using the first 3 parameter variant. diff --git a/.travis.yml b/.travis.yml index 123e178..712f247 100644 --- a/.travis.yml +++ b/.travis.yml @@ -42,9 +42,6 @@ before_install: - sudo add-apt-repository -y ppa:waja/trusty-backports - sudo apt-get update -qq - sudo apt-get purge -qq gawk - # http://docs.travis-ci.com/user/trusty-ci-environment/ indicates, no MySQL on Trusty (yet) - # # ensure we have a test database in place for tests - # - mysql -e "create database IF NOT EXISTS test;" -uroot install: - sudo apt-get install -qq --no-install-recommends perl autotools-dev libdbi-dev libldap2-dev libpq-dev libmysqlclient-dev libradcli-dev libkrb5-dev libnet-snmp-perl procps @@ -74,6 +71,7 @@ before_script: - tools/setup - ./configure --enable-libtap - make + - export NPTEST_ACCEPTDEFAULT=1 - export NPTEST_CACHE="$(pwd)/plugins/t/NPTest.cache.travis" - ssh-keygen -t dsa -N "" -f ~/.ssh/id_dsa - cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys @@ -82,7 +80,7 @@ before_script: - sudo rm -f /usr/share/mibs/ietf/SNMPv2-PDU /usr/share/mibs/ietf/IPSEC-SPD-MIB /usr/share/mibs/ietf/IPATM-IPMC-MIB /usr/share/mibs/iana/IANA-IPPM-METRICS-REGISTRY-MIB - sudo mkdir -p /var/lib/snmp/mib_indexes - sudo mkdir /media/ramdisk && sudo chmod 777 /media/ramdisk && sudo mount -t tmpfs -o size=20% none /media/ramdisk - - sed "/host_tls_cert/s/.*/'host_tls_cert' => '$(hostname)',/" -i $NPTEST_CACHE + - sed "/NP_HOST_TLS_CERT/s/.*/'NP_HOST_TLS_CERT' => '$(hostname)',/" -i $NPTEST_CACHE script: - if [ "$COVERITY_SCAN_BRANCH" != 1 ]; then make test; fi diff --git a/NPTest.pm b/NPTest.pm index 2248e8e..4b2de39 100644 --- a/NPTest.pm +++ b/NPTest.pm @@ -62,17 +62,6 @@ runs will use these values. The user is able to change the values by amending the values in the file /var/tmp/NPTest.cache, or by setting the appropriate environment variable before running the test. -The option exists to store parameters in a scoped means, allowing a -test harness to a localise a parameter should the need arise. This -allows a parameter of the same name to exist in a test harness -specific scope, while not affecting the globally scoped parameter. The -scoping identifier is the name of the test harness sans the trailing -".t". All cache searches first look to a scoped parameter before -looking for the parameter at global scope. Thus for a test harness -called "check_disk.t" requesting the parameter "mountpoint_valid", the -cache is first searched for "check_disk"/"mountpoint_valid", if this -fails, then a search is conducted for "mountpoint_valid". - To facilitate quick testing setup, it is possible to accept all the developer provided defaults by setting the environment variable "NPTEST_ACCEPTDEFAULT" to "1" (or any other perl truth value). Note @@ -327,78 +316,51 @@ sub skipMsg return $testStatus; } -sub getTestParameter -{ - my( $param, $envvar, $default, $brief, $scoped ); - my $new_style; - if (scalar @_ <= 3) { - ($param, $brief, $default) = @_; - $envvar = $param; - $new_style = 1; - } else { - ( $param, $envvar, $default, $brief, $scoped ) = @_; - $new_style = 0; - } - - # Apply default values for optional arguments - $scoped = ( defined( $scoped ) && $scoped ); +sub getTestParameter { + my($param, $description, $default) = @_; - my $testharness = basename( (caller(0))[1], ".t" ); # used for scoping - - if ( defined( $envvar ) && exists( $ENV{$envvar} ) && $ENV{$envvar} ) - { - return $ENV{$envvar}; + if($param !~ m/^NP_[A-Z0-9_]+$/mx) { + die("parameter should be all uppercase and start with NP_ (requested from ".(caller(0))[1].")"); } - my $cachedValue = SearchCache( $param, $testharness ); - if ( defined( $cachedValue ) ) - { - # This save required to convert to new style because the key required is - # changing to the environment variable - if ($new_style == 0) { - SetCacheParameter( $envvar, undef, $cachedValue ); - } + return $ENV{$param} if $ENV{$param}; + + my $cachedValue = SearchCache($param); + if(defined $cachedValue) { return $cachedValue; } - my $defaultValid = ( defined( $default ) && $default ); - my $autoAcceptDefault = ( exists( $ENV{'NPTEST_ACCEPTDEFAULT'} ) && $ENV{'NPTEST_ACCEPTDEFAULT'} ); - - if ( $autoAcceptDefault && $defaultValid ) - { - return $default; + if($ENV{'NPTEST_ACCEPTDEFAULT'}) { + return $default if $default; + return ""; } # Set "none" if no terminal attached (eg, tinderbox build servers when new variables set) return "" unless (-t STDIN); my $userResponse = ""; - - while ( $userResponse eq "" ) - { + while($userResponse eq "") { print STDERR "\n"; - print STDERR "Test Harness : $testharness\n"; - print STDERR "Test Parameter : $param\n"; - print STDERR "Environment Variable : $envvar\n" if ($param ne $envvar); - print STDERR "Brief Description : $brief\n"; - print STDERR "Enter value (or 'none') ", ($defaultValid ? "[${default}]" : "[]"), " => "; + print STDERR "Test File : ".(caller(0))[1]."\n"; + print STDERR "Test Parameter : $param\n"; + print STDERR "Description : $description\n"; + print STDERR "Enter value (or 'none') ", ($default ? "[${default}]" : "[]"), " => "; $userResponse = ; $userResponse = "" if ! defined( $userResponse ); # Handle EOF - chomp( $userResponse ); - if ( $defaultValid && $userResponse eq "" ) - { + chomp($userResponse); + if($default && $userResponse eq "") { $userResponse = $default; } } print STDERR "\n"; - if ($userResponse =~ /^(na|none)$/) { + if($userResponse =~ /^(na|none)$/) { $userResponse = ""; } - # define all user responses at global scope - SetCacheParameter( $param, ( $scoped ? $testharness : undef ), $userResponse ); + # store user responses + SetCacheParameter($param, $userResponse); return $userResponse; } @@ -407,37 +369,20 @@ sub getTestParameter # Internal Cache Management Functions # -sub SearchCache -{ - my( $param, $scope ) = @_; +sub SearchCache { + my($param) = @_; LoadCache(); - if ( exists( $CACHE{$scope} ) && exists( $CACHE{$scope}{$param} ) ) - { - return $CACHE{$scope}{$param}; - } - - if ( exists( $CACHE{$param} ) ) - { + if(exists $CACHE{$param}) { return $CACHE{$param}; } return undef; # Need this to say "nothing found" } -sub SetCacheParameter -{ - my( $param, $scope, $value ) = @_; - - if ( defined( $scope ) ) - { - $CACHE{$scope}{$param} = $value; - } - else - { - $CACHE{$param} = $value; - } - +sub SetCacheParameter { + my($param, $value) = @_; + $CACHE{$param} = $value; SaveCache(); } @@ -475,6 +420,11 @@ sub SaveCache delete $CACHE{'_cache_loaded_'}; my $oldFileContents = delete $CACHE{'_original_cache'}; + # clean up old style params + for my $key (keys %CACHE) { + delete $CACHE{$key} if $key !~ m/^NP_[A-Z0-9_]+$/mx; + } + my($dataDumper) = new Data::Dumper([\%CACHE]); $dataDumper->Terse(1); $dataDumper->Sortkeys(1); @@ -486,7 +436,7 @@ sub SaveCache if($oldFileContents ne $data) { my($fileHandle) = new IO::File; if (!$fileHandle->open( "> ${CACHEFILENAME}")) { - print STDERR "NPTest::LoadCache() : Problem saving ${CACHEFILENAME} : $!\n"; + print STDERR "NPTest::SaveCache() : Problem saving ${CACHEFILENAME} : $!\n"; return; } print $fileHandle $data; diff --git a/plugins/t/NPTest.cache.travis b/plugins/t/NPTest.cache.travis index 28437a0..6ee4505 100644 --- a/plugins/t/NPTest.cache.travis +++ b/plugins/t/NPTest.cache.travis @@ -1,64 +1,54 @@ { - 'MYSQL_LOGIN_DETAILS' => '-u root -d test', 'NP_ALLOW_SUDO' => 'yes', 'NP_DNS_SERVER' => '8.8.8.8', 'NP_GOOD_NTP_SERVICE' => '', + 'NP_HOST_DHCP_RESPONSIVE' => '', + 'NP_HOST_HPJD_PORT_INVALID' => '161', + 'NP_HOST_HPJD_PORT_VALID' => '', + 'NP_HOSTNAME_INVALID_CIDR' => '130.133.8.39/30', 'NP_HOSTNAME_INVALID' => 'nosuchhost', - 'NP_HOSTNAME_VALID' => 'monitoring-plugins.org', - 'NP_HOSTNAME_VALID_IP' => '130.133.8.40', 'NP_HOSTNAME_VALID_CIDR' => '130.133.8.41/30', - 'NP_HOSTNAME_INVALID_CIDR' => '130.133.8.39/30', + 'NP_HOSTNAME_VALID_IP' => '130.133.8.40', + 'NP_HOSTNAME_VALID' => 'monitoring-plugins.org', 'NP_HOSTNAME_VALID_REVERSE' => 'orwell.monitoring-plugins.org.', - 'NP_HOST_DHCP_RESPONSIVE' => '', 'NP_HOST_NONRESPONSIVE' => '10.0.0.1', 'NP_HOST_RESPONSIVE' => 'localhost', 'NP_HOST_SMB' => '', - 'NP_HOST_SNMP' => 'localhost', + 'NP_HOST_SNMP' => '', 'NP_HOST_TCP_FTP' => '', 'NP_HOST_TCP_HPJD' => '', - 'NP_HOST_HPJD_PORT_INVALID' => '161', - 'NP_HOST_HPJD_PORT_VALID' => '', - 'NP_HOST_TCP_HTTP' => 'localhost', 'NP_HOST_TCP_HTTP2' => 'test.monitoring-plugins.org', + 'NP_HOST_TCP_HTTP' => 'localhost', 'NP_HOST_TCP_IMAP' => 'imap.web.de', + 'NP_HOST_TCP_JABBER' => 'jabber.org', 'NP_HOST_TCP_LDAP' => 'localhost', 'NP_HOST_TCP_POP' => 'pop.web.de', + 'NP_HOST_TCP_PROXY' => 'localhost', 'NP_HOST_TCP_SMTP' => 'localhost', 'NP_HOST_TCP_SMTP_NOTLS' => '', 'NP_HOST_TCP_SMTP_TLS' => '', + 'NP_HOST_TLS_CERT' => 'localhost, + 'NP_HOST_TLS_HTTP' => 'localhost', + 'NP_HOST_UDP_TIME' => 'none', 'NP_INTERNET_ACCESS' => 'yes', 'NP_LDAP_BASE_DN' => 'cn=admin,dc=nodomain', 'NP_MOUNTPOINT2_VALID' => '/media/ramdisk', 'NP_MOUNTPOINT_VALID' => '/', + 'NP_MYSQL_LOGIN_DETAILS' => '-u root -d test', 'NP_MYSQL_SERVER' => 'localhost', - 'NP_HOST_UDP_TIME' => 'localhost', 'NP_MYSQL_SOCKET' => '/var/run/mysqld/mysqld.sock', 'NP_MYSQL_WITH_SLAVE' => '', 'NP_MYSQL_WITH_SLAVE_LOGIN' => '', 'NP_NO_NTP_SERVICE' => 'localhost', + 'NP_PORT_TCP_PROXY' => '3128', 'NP_SMB_SHARE' => '', 'NP_SMB_SHARE_DENY' => '', 'NP_SMB_SHARE_SPC' => '', 'NP_SMB_VALID_USER' => '', 'NP_SMB_VALID_USER_PASS' => '', - 'NP_SNMP_COMMUNITY' => 'public', + 'NP_SNMP_COMMUNITY' => '', + 'NP_SNMP_USER' => '', 'NP_SSH_CONFIGFILE' => '~/.ssh/config', 'NP_SSH_HOST' => 'localhost', - 'NP_SSH_IDENTITY' => '~/.ssh/id_dsa', - 'NP_HOST_TCP_JABBER' => 'jabber.org', - 'host_nonresponsive' => '10.0.0.1', - 'host_responsive' => 'localhost', - 'host_snmp' => '', - 'host_tcp_ftp' => '', - 'host_tcp_http' => 'localhost', - 'host_tcp_imap' => 'imap.nierlein.de', - 'host_tcp_smtp' => 'localhost', - 'hostname_invalid' => 'nosuchhost', - 'snmp_community' => '', - 'user_snmp' => '', - 'host_udp_time' => 'none', - 'host_tls_http' => 'localhost', - 'host_tls_cert' => 'localhost', - 'NP_HOST_TCP_PROXY' => 'localhost', - 'NP_PORT_TCP_PROXY' => '3128', + 'NP_SSH_IDENTITY' => '~/.ssh/id_dsa' } diff --git a/plugins/t/check_by_ssh.t b/plugins/t/check_by_ssh.t index 4797390..1d2939e 100644 --- a/plugins/t/check_by_ssh.t +++ b/plugins/t/check_by_ssh.t @@ -9,17 +9,9 @@ use Test::More; use NPTest; # Required parameters -my $ssh_service = getTestParameter( "NP_SSH_HOST", - "A host providing SSH service", - "localhost"); - -my $ssh_key = getTestParameter( "NP_SSH_IDENTITY", - "A key allowing access to NP_SSH_HOST", - "~/.ssh/id_dsa"); - -my $ssh_conf = getTestParameter( "NP_SSH_CONFIGFILE", - "A config file with ssh settings", - "~/.ssh/config"); +my $ssh_service = getTestParameter("NP_SSH_HOST", "A host providing SSH service", "localhost"); +my $ssh_key = getTestParameter("NP_SSH_IDENTITY", "A key allowing access to NP_SSH_HOST", "~/.ssh/id_dsa"); +my $ssh_conf = getTestParameter( "NP_SSH_CONFIGFILE", "A config file with ssh settings", "~/.ssh/config"); plan skip_all => "SSH_HOST and SSH_IDENTITY must be defined" unless ($ssh_service && $ssh_key); diff --git a/plugins/t/check_fping.t b/plugins/t/check_fping.t index 08692e4..342b0a7 100644 --- a/plugins/t/check_fping.t +++ b/plugins/t/check_fping.t @@ -15,15 +15,9 @@ BEGIN {$tests = 4; plan tests => $tests} my $successOutput = '/^FPING OK - /'; my $failureOutput = '/^FPING CRITICAL - /'; -my $host_responsive = getTestParameter( "host_responsive", "NP_HOST_RESPONSIVE", "localhost", - "The hostname of system responsive to network requests" ); - -my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1", - "The hostname of system not responsive to network requests" ); - -my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost", - "An invalid (not known to DNS) hostname" ); - +my $host_responsive = getTestParameter("NP_HOST_RESPONSIVE", "The hostname of system responsive to network requests", "localhost"); +my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1"); +my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost"); my $t; diff --git a/plugins/t/check_ftp.t b/plugins/t/check_ftp.t index de6831b..93a7d7c 100644 --- a/plugins/t/check_ftp.t +++ b/plugins/t/check_ftp.t @@ -11,14 +11,9 @@ use NPTest; use vars qw($tests); BEGIN {$tests = 4; plan tests => $tests} -my $host_tcp_ftp = getTestParameter( "host_tcp_ftp", "NP_HOST_TCP_FTP", "localhost", - "A host providing the FTP Service (an FTP server)"); - -my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1", - "The hostname of system not responsive to network requests" ); - -my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost", - "An invalid (not known to DNS) hostname" ); +my $host_tcp_ftp = getTestParameter("NP_HOST_TCP_FTP", "A host providing the FTP Service (an FTP server)", "localhost"); +my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1"); +my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost"); my $successOutput = '/FTP OK -\s+[0-9]?\.?[0-9]+ second response time/'; diff --git a/plugins/t/check_http.t b/plugins/t/check_http.t index 416fbbc..b3760eb 100644 --- a/plugins/t/check_http.t +++ b/plugins/t/check_http.t @@ -9,46 +9,21 @@ use Test::More; use POSIX qw/mktime strftime/; use NPTest; -plan tests => 55; +plan tests => 50; my $successOutput = '/OK.*HTTP.*second/'; my $res; -my $host_tcp_http = getTestParameter( "NP_HOST_TCP_HTTP", - "A host providing the HTTP Service (a web server)", - "localhost" ); - -my $host_tls_http = getTestParameter( "host_tls_http", "NP_HOST_TLS_HTTP", "localhost", - "A host providing the HTTPS Service (a tls web server)" ); - -my $host_tls_cert = getTestParameter( "host_tls_cert", "NP_HOST_TLS_CERT", "localhost", - "the common name of the certificate." ); - - -my $host_nonresponsive = getTestParameter( "NP_HOST_NONRESPONSIVE", - "The hostname of system not responsive to network requests", - "10.0.0.1" ); - -my $hostname_invalid = getTestParameter( "NP_HOSTNAME_INVALID", - "An invalid (not known to DNS) hostname", - "nosuchhost"); - -my $internet_access = getTestParameter( "NP_INTERNET_ACCESS", - "Is this system directly connected to the internet?", - "yes"); - -my $host_tcp_http2 = getTestParameter( "NP_HOST_TCP_HTTP2", - "A host providing an index page containing the string 'monitoring'", - "test.monitoring-plugins.org" ); - -my $host_tcp_proxy = getTestParameter( "NP_HOST_TCP_PROXY", - "A host providing a HTTP proxy with CONNECT support", - "localhost"); - -my $port_tcp_proxy = getTestParameter( "NP_PORT_TCP_PROXY", - "Port of the proxy with HTTP and CONNECT support", - "3128"); +my $host_tcp_http = getTestParameter("NP_HOST_TCP_HTTP", "A host providing the HTTP Service (a web server)", "localhost"); +my $host_tls_http = getTestParameter("NP_HOST_TLS_HTTP", "A host providing the HTTPS Service (a tls web server)", "localhost"); +my $host_tls_cert = getTestParameter("NP_HOST_TLS_CERT", "the common name of the certificate.", "localhost"); +my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1"); +my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost"); +my $internet_access = getTestParameter("NP_INTERNET_ACCESS", "Is this system directly connected to the internet?", "yes"); +my $host_tcp_http2 = getTestParameter("NP_HOST_TCP_HTTP2", "A host providing an index page containing the string 'monitoring'", "test.monitoring-plugins.org"); +my $host_tcp_proxy = getTestParameter("NP_HOST_TCP_PROXY", "A host providing a HTTP proxy with CONNECT support", "localhost"); +my $port_tcp_proxy = getTestParameter("NP_PORT_TCP_PROXY", "Port of the proxy with HTTP and CONNECT support", "3128"); my $faketime = -x '/usr/bin/faketime' ? 1 : 0; @@ -158,7 +133,7 @@ SKIP: { # run some certificate checks with faketime SKIP: { - skip "No faketime binary found", 12 if !$faketime; + skip "No faketime binary found", 7 if !$faketime; $res = NPTest->testCmd("LC_TIME=C TZ=UTC ./check_http -C 1 $host_tls_http"); like($res->output, qr/OK - Certificate '$host_tls_cert' will expire on/, "Catch cert output"); is( $res->return_code, 0, "Catch cert output exit code" ); @@ -171,23 +146,18 @@ SKIP: { my $time = strftime("%Y-%m-%d %H:%M:%S", localtime($ts)); $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts))."' ./check_http -C 1 $host_tls_http"); like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' just expired/, "Output on expire date"); - is( $res->return_code, 2, "Output on expire date" ); $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-1))."' ./check_http -C 1 $host_tls_http"); like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expires in 0 minutes/, "cert expires in 1 second output"); - is( $res->return_code, 2, "cert expires in 1 second exit code" ); $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-120))."' ./check_http -C 1 $host_tls_http"); like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expires in 2 minutes/, "cert expires in 2 minutes output"); - is( $res->return_code, 2, "cert expires in 2 minutes exit code" ); $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-7200))."' ./check_http -C 1 $host_tls_http"); like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expires in 2 hours/, "cert expires in 2 hours output"); - is( $res->return_code, 2, "cert expires in 2 hours exit code" ); $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./check_http -C 1 $host_tls_http"); like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expired on/, "Certificate expired output"); - is( $res->return_code, 2, "Certificate expired exit code" ); }; $res = NPTest->testCmd( "./check_http --ssl $host_tls_http -E" ); diff --git a/plugins/t/check_imap.t b/plugins/t/check_imap.t index 9c6eae1..7c74e56 100644 --- a/plugins/t/check_imap.t +++ b/plugins/t/check_imap.t @@ -8,17 +8,10 @@ use strict; use Test::More tests => 7; use NPTest; -my $host_tcp_smtp = getTestParameter( "host_tcp_smtp", "NP_HOST_TCP_SMTP", "mailhost", - "A host providing an STMP Service (a mail server)"); - -my $host_tcp_imap = getTestParameter( "host_tcp_imap", "NP_HOST_TCP_IMAP", $host_tcp_smtp, - "A host providing an IMAP Service (a mail server)"); - -my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1", - "The hostname of system not responsive to network requests" ); - -my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost", - "An invalid (not known to DNS) hostname" ); +my $host_tcp_smtp = getTestParameter("NP_HOST_TCP_SMTP", "A host providing an STMP Service (a mail server)", "mailhost"); +my $host_tcp_imap = getTestParameter("NP_HOST_TCP_IMAP", "A host providing an IMAP Service (a mail server)", $host_tcp_smtp); +my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1"); +my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost"); my $t; diff --git a/plugins/t/check_jabber.t b/plugins/t/check_jabber.t index 7a708d5..fcdae17 100644 --- a/plugins/t/check_jabber.t +++ b/plugins/t/check_jabber.t @@ -10,23 +10,9 @@ use NPTest; plan tests => 10; -my $host_tcp_jabber = getTestParameter( - "NP_HOST_TCP_JABBER", - "A host providing the Jabber Service", - "jabber.org" - ); - -my $host_nonresponsive = getTestParameter( - "NP_HOST_NONRESPONSIVE", - "The hostname of system not responsive to network requests", - "10.0.0.1", - ); - -my $hostname_invalid = getTestParameter( - "NP_HOSTNAME_INVALID", - "An invalid (not known to DNS) hostname", - "nosuchhost", - ); +my $host_tcp_jabber = getTestParameter("NP_HOST_TCP_JABBER", "A host providing the Jabber Service", "jabber.de"); +my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1"); +my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost"); my $jabberOK = '/JABBER OK\s-\s\d+\.\d+\ssecond response time on '.$host_tcp_jabber.' port 5222/'; diff --git a/plugins/t/check_ldap.t b/plugins/t/check_ldap.t index b8944d4..b8a4a76 100644 --- a/plugins/t/check_ldap.t +++ b/plugins/t/check_ldap.t @@ -9,19 +9,10 @@ use warnings; use Test::More; use NPTest; -my $host_tcp_ldap = getTestParameter("NP_HOST_TCP_LDAP", - "A host providing the LDAP Service", - "localhost" ); - -my $ldap_base_dn = getTestParameter("NP_LDAP_BASE_DN", - "A base dn for the LDAP Service", - "cn=admin" ); - -my $host_nonresponsive = getTestParameter("host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1", - "The hostname of system not responsive to network requests" ); - -my $hostname_invalid = getTestParameter("hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost", - "An invalid (not known to DNS) hostname" ); +my $host_tcp_ldap = getTestParameter("NP_HOST_TCP_LDAP", "A host providing the LDAP Service", "localhost"); +my $ldap_base_dn = getTestParameter("NP_LDAP_BASE_DN", "A base dn for the LDAP Service", "cn=admin"); +my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1"); +my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost"); my($result, $cmd); my $command = './check_ldap'; diff --git a/plugins/t/check_mysql.t b/plugins/t/check_mysql.t index 28cd4cd..e426bf5 100644 --- a/plugins/t/check_mysql.t +++ b/plugins/t/check_mysql.t @@ -21,30 +21,11 @@ plan skip_all => "check_mysql not compiled" unless (-x "check_mysql"); plan tests => 15; my $bad_login_output = '/Access denied for user /'; -my $mysqlserver = getTestParameter( - "NP_MYSQL_SERVER", - "A MySQL Server hostname or IP with no slaves setup" - ); -my $mysqlsocket = getTestParameter( - "NP_MYSQL_SOCKET", - "Full path to a MySQL Server socket with no slaves setup" - ); -my $mysql_login_details = getTestParameter( - "MYSQL_LOGIN_DETAILS", - "Command line parameters to specify login access (requires " . - "REPLICATION CLIENT privleges)", - "-u test -ptest", - ); -my $with_slave = getTestParameter( - "NP_MYSQL_WITH_SLAVE", - "MySQL server with slaves setup" - ); -my $with_slave_login = getTestParameter( - "NP_MYSQL_WITH_SLAVE_LOGIN", - "Login details for server with slave (requires REPLICATION CLIENT " . - "privleges)", - $mysql_login_details || "-u test -ptest" - ); +my $mysqlserver = getTestParameter("NP_MYSQL_SERVER", "A MySQL Server hostname or IP with no slaves setup"); +my $mysqlsocket = getTestParameter("NP_MYSQL_SOCKET", "Full path to a MySQL Server socket with no slaves setup"); +my $mysql_login_details = getTestParameter("NP_MYSQL_LOGIN_DETAILS", "Command line parameters to specify login access (requires REPLICATION CLIENT privleges)", "-u test -ptest"); +my $with_slave = getTestParameter("NP_MYSQL_WITH_SLAVE", "MySQL server with slaves setup"); +my $with_slave_login = getTestParameter("NP_MYSQL_WITH_SLAVE_LOGIN", "Login details for server with slave (requires REPLICATION CLIENT privleges)", $mysql_login_details || "-u test -ptest"); my $result; diff --git a/plugins/t/check_mysql_query.t b/plugins/t/check_mysql_query.t index 407af88..96899ac 100644 --- a/plugins/t/check_mysql_query.t +++ b/plugins/t/check_mysql_query.t @@ -17,15 +17,8 @@ use vars qw($tests); plan skip_all => "check_mysql_query not compiled" unless (-x "check_mysql_query"); -my $mysqlserver = getTestParameter( - "NP_MYSQL_SERVER", - "A MySQL Server with no slaves setup" - ); -my $mysql_login_details = getTestParameter( - "MYSQL_LOGIN_DETAILS", - "Command line parameters to specify login access", - "-u user -ppw -d db", - ); +my $mysqlserver = getTestParameter("NP_MYSQL_SERVER", "A MySQL Server with no slaves setup"); +my $mysql_login_details = getTestParameter("NP_MYSQL_LOGIN_DETAILS", "Command line parameters to specify login access", "-u user -ppw -d db"); my $result; if (! $mysqlserver) { diff --git a/plugins/t/check_snmp.t b/plugins/t/check_snmp.t index 9a6cd2b..f2f218f 100644 --- a/plugins/t/check_snmp.t +++ b/plugins/t/check_snmp.t @@ -15,18 +15,12 @@ BEGIN { my $res; -my $host_snmp = getTestParameter( "host_snmp", "NP_HOST_SNMP", "localhost", - "A host providing an SNMP Service"); +my $host_snmp = getTestParameter("NP_HOST_SNMP", "A host providing an SNMP Service", "localhost"); +my $snmp_community = getTestParameter("NP_SNMP_COMMUNITY", "The SNMP Community string for SNMP Testing (assumes snmp v1)", "public"); +my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1"); +my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost"); +my $user_snmp = getTestParameter("NP_SNMP_USER", "An SNMP user", "auth_md5"); -my $snmp_community = getTestParameter( "snmp_community", "NP_SNMP_COMMUNITY", "public", - "The SNMP Community string for SNMP Testing (assumes snmp v1)" ); - -my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1", - "The hostname of system not responsive to network requests" ); - -my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost", - "An invalid (not known to DNS) hostname" ); -my $user_snmp = getTestParameter( "user_snmp", "NP_SNMP_USER", "auth_md5", "An SNMP user"); $res = NPTest->testCmd( "./check_snmp -t 1" ); is( $res->return_code, 3, "No host name" ); diff --git a/plugins/t/check_ssh.t b/plugins/t/check_ssh.t index 8008349..a5cd23c 100644 --- a/plugins/t/check_ssh.t +++ b/plugins/t/check_ssh.t @@ -9,17 +9,9 @@ use Test::More; use NPTest; # Required parameters -my $ssh_host = getTestParameter("NP_SSH_HOST", - "A host providing SSH service", - "localhost"); - -my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", - "The hostname of system not responsive to network requests", - "10.0.0.1" ); - -my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", - "An invalid (not known to DNS) hostname", - "nosuchhost" ); +my $ssh_host = getTestParameter("NP_SSH_HOST", "A host providing SSH service", "localhost"); +my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1" ); +my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost" ); plan skip_all => "SSH_HOST must be defined" unless $ssh_host; diff --git a/plugins/t/check_tcp.t b/plugins/t/check_tcp.t index 121b0cb..cb4de53 100644 --- a/plugins/t/check_tcp.t +++ b/plugins/t/check_tcp.t @@ -15,21 +15,11 @@ BEGIN { } -my $host_tcp_http = getTestParameter( "host_tcp_http", "NP_HOST_TCP_HTTP", "localhost", - "A host providing the HTTP Service (a web server)" ); - -my $host_tls_http = getTestParameter( "host_tls_http", "NP_HOST_TLS_HTTP", "localhost", - "A host providing the HTTPS Service (a tls web server)" ); - -my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1", - "The hostname of system not responsive to network requests" ); - -my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost", - "An invalid (not known to DNS) hostname" ); - -my $internet_access = getTestParameter( "NP_INTERNET_ACCESS", - "Is this system directly connected to the internet?", - "yes"); +my $host_tcp_http = getTestParameter("NP_HOST_TCP_HTTP", "A host providing the HTTP Service (a web server)", "localhost"); +my $host_tls_http = getTestParameter("NP_HOST_TLS_HTTP", "A host providing the HTTPS Service (a tls web server)", "localhost"); +my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1"); +my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost"); +my $internet_access = getTestParameter("NP_INTERNET_ACCESS", "Is this system directly connected to the internet?", "yes"); my $successOutput = '/^TCP OK\s-\s+[0-9]?\.?[0-9]+ second response time on port [0-9]+/'; diff --git a/plugins/t/check_time.t b/plugins/t/check_time.t index 961f56e..92c2f89 100644 --- a/plugins/t/check_time.t +++ b/plugins/t/check_time.t @@ -11,14 +11,9 @@ use NPTest; use vars qw($tests); BEGIN {$tests = 8; plan tests => $tests} -my $host_udp_time = getTestParameter( "host_udp_time", "NP_HOST_UDP_TIME", "localhost", - "A host providing the UDP Time Service" ); - -my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1", - "The hostname of system not responsive to network requests" ); - -my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost", - "An invalid (not known to DNS) hostname" ); +my $host_udp_time = getTestParameter("NP_HOST_UDP_TIME", "A host providing the UDP Time Service", "localhost"); +my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1"); +my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost"); my $successOutput = '/^TIME OK - [0-9]+ second time difference/'; -- cgit v0.10-9-g596f From fd6dc666538dd95f11811817ceb21604676e142c Mon Sep 17 00:00:00 2001 From: Lars Michelsen Date: Tue, 13 Oct 2015 10:26:01 +0200 Subject: check_icmp: Add IPv6 support This commit adds IPv6 capabilities to check_icmp. It is now possible to specify the address family using the arguments -4 (default) or -6. To make the change possible we had to move the argument parsing previous to creating the socket to be able to create it with the correct address family. This commit also fixes some gcc 4.9.2 compiler warnings. It has been tested with several current linux distributions (debian, ubuntu, rh, sles). This commit fixes monitoring-plugins/monitoring-plugins#1291 diff --git a/plugins-root/check_icmp.c b/plugins-root/check_icmp.c index 4098874..6aeedf4 100644 --- a/plugins-root/check_icmp.c +++ b/plugins-root/check_icmp.c @@ -67,7 +67,9 @@ const char *email = "devel@monitoring-plugins.org"; #include #include #include +#include #include +#include #include #include #include @@ -113,8 +115,8 @@ typedef struct rta_host { unsigned short id; /* id in **table, and icmp pkts */ char *name; /* arg used for adding this host */ char *msg; /* icmp error message, if any */ - struct sockaddr_in saddr_in; /* the address of this host */ - struct in_addr error_addr; /* stores address of error replies */ + struct sockaddr_storage saddr_in; /* the address of this host */ + struct sockaddr_storage error_addr; /* stores address of error replies */ unsigned long long time_waited; /* total time waited, in usecs */ unsigned int icmp_sent, icmp_recv, icmp_lost; /* counters */ unsigned char icmp_type, icmp_code; /* type and code from errors */ @@ -140,6 +142,18 @@ typedef struct icmp_ping_data { unsigned short ping_id; } icmp_ping_data; +typedef union ip_hdr { + struct ip ip; + struct ip6_hdr ip6; +} ip_hdr; + +typedef union icmp_packet { + void *buf; + struct icmp *icp; + struct icmp6_hdr *icp6; + u_short *cksum_in; +} icmp_packet; + /* the different modes of this program are as follows: * MODE_RTA: send all packets no matter what (mimic check_icmp and check_ping) * MODE_HOSTCHECK: Return immediately upon any sign of life @@ -190,8 +204,9 @@ static int get_threshold(char *str, threshold *th); static void run_checks(void); static void set_source_ip(char *); static int add_target(char *); -static int add_target_ip(char *, struct in_addr *); -static int handle_random_icmp(unsigned char *, struct sockaddr_in *); +static int add_target_ip(char *, struct sockaddr_storage *); +static int handle_random_icmp(unsigned char *, struct sockaddr_storage *); +static void parse_address(struct sockaddr_storage *, char *, int); static unsigned short icmp_checksum(unsigned short *, int); static void finish(int); static void crash(const char *, ...); @@ -300,7 +315,7 @@ get_icmp_error_msg(unsigned char icmp_type, unsigned char icmp_code) } static int -handle_random_icmp(unsigned char *packet, struct sockaddr_in *addr) +handle_random_icmp(unsigned char *packet, struct sockaddr_storage *addr) { struct icmp p, sent_icmp; struct rta_host *host = NULL; @@ -342,9 +357,11 @@ handle_random_icmp(unsigned char *packet, struct sockaddr_in *addr) /* it is indeed a response for us */ host = table[ntohs(sent_icmp.icmp_seq)/packets]; if(debug) { + char address[INET6_ADDRSTRLEN]; + parse_address(addr, address, sizeof(address)); printf("Received \"%s\" from %s for ICMP ECHO sent to %s.\n", - get_icmp_error_msg(p.icmp_type, p.icmp_code), - inet_ntoa(addr->sin_addr), host->name); + get_icmp_error_msg(p.icmp_type, p.icmp_code), + address, host->name); } icmp_lost++; @@ -364,11 +381,23 @@ handle_random_icmp(unsigned char *packet, struct sockaddr_in *addr) } host->icmp_type = p.icmp_type; host->icmp_code = p.icmp_code; - host->error_addr.s_addr = addr->sin_addr.s_addr; + host->error_addr = *addr; return 0; } +void parse_address(struct sockaddr_storage *addr, char *address, int size) +{ + switch (address_family) { + case AF_INET: + inet_ntop(address_family, &((struct sockaddr_in *)addr)->sin_addr, address, size); + break; + case AF_INET6: + inet_ntop(address_family, &((struct sockaddr_in6 *)addr)->sin6_addr, address, size); + break; + } +} + int main(int argc, char **argv) { @@ -390,7 +419,90 @@ main(int argc, char **argv) * that before pointer magic (esp. on network data) */ icmp_sockerrno = udp_sockerrno = tcp_sockerrno = sockets = 0; - if((icmp_sock = socket(PF_INET, SOCK_RAW, IPPROTO_ICMP)) != -1) + address_family = AF_INET; + int icmp_proto = IPPROTO_ICMP; + + /* parse the arguments */ + for(i = 1; i < argc; i++) { + while((arg = getopt(argc, argv, "vhVw:c:n:p:t:H:s:i:b:I:l:m:64")) != EOF) { + unsigned short size; + switch(arg) { + case 'v': + debug++; + break; + case 'b': + size = (unsigned short)strtol(optarg,NULL,0); + if (size >= (sizeof(struct icmp) + sizeof(struct icmp_ping_data)) && + size < MAX_PING_DATA) { + icmp_data_size = size; + icmp_pkt_size = size + ICMP_MINLEN; + } else + usage_va("ICMP data length must be between: %d and %d", + sizeof(struct icmp) + sizeof(struct icmp_ping_data), + MAX_PING_DATA - 1); + break; + case 'i': + pkt_interval = get_timevar(optarg); + break; + case 'I': + target_interval = get_timevar(optarg); + break; + case 'w': + get_threshold(optarg, &warn); + break; + case 'c': + get_threshold(optarg, &crit); + break; + case 'n': + case 'p': + packets = strtoul(optarg, NULL, 0); + break; + case 't': + timeout = strtoul(optarg, NULL, 0); + if(!timeout) timeout = 10; + break; + case 'H': + add_target(optarg); + break; + case 'l': + ttl = (unsigned char)strtoul(optarg, NULL, 0); + break; + case 'm': + min_hosts_alive = (int)strtoul(optarg, NULL, 0); + break; + case 'd': /* implement later, for cluster checks */ + warn_down = (unsigned char)strtoul(optarg, &ptr, 0); + if(ptr) { + crit_down = (unsigned char)strtoul(ptr + 1, NULL, 0); + } + break; + case 's': /* specify source IP address */ + set_source_ip(optarg); + break; + case 'V': /* version */ + print_revision (progname, NP_VERSION); + exit (STATE_UNKNOWN); + case 'h': /* help */ + print_help (); + exit (STATE_UNKNOWN); + break; + case '4': + address_family = AF_INET; + icmp_proto = IPPROTO_ICMP; + break; + case '6': +#ifdef USE_IPV6 + address_family = AF_INET6; + icmp_proto = IPPROTO_ICMPV6; +#else + usage (_("IPv6 support not available\n")); +#endif + break; + } + } + } + + if((icmp_sock = socket(address_family, SOCK_RAW, icmp_proto)) != -1) sockets |= HAVE_ICMP; else icmp_sockerrno = errno; @@ -403,7 +515,10 @@ main(int argc, char **argv) /* else tcp_sockerrno = errno; */ /* now drop privileges (no effect if not setsuid or geteuid() == 0) */ - setuid(getuid()); + if (setuid(getuid()) == -1) { + printf("ERROR: Failed to drop privileges\n"); + return 1; + } #ifdef SO_TIMESTAMP if(setsockopt(icmp_sock, SOL_SOCKET, SO_TIMESTAMP, &on, sizeof(on))) @@ -467,73 +582,6 @@ main(int argc, char **argv) strcpy(argv[1], "-V"); } - /* parse the arguments */ - for(i = 1; i < argc; i++) { - while((arg = getopt(argc, argv, "vhVw:c:n:p:t:H:s:i:b:I:l:m:")) != EOF) { - unsigned short size; - switch(arg) { - case 'v': - debug++; - break; - case 'b': - size = (unsigned short)strtol(optarg,NULL,0); - if (size >= (sizeof(struct icmp) + sizeof(struct icmp_ping_data)) && - size < MAX_PING_DATA) { - icmp_data_size = size; - icmp_pkt_size = size + ICMP_MINLEN; - } else - usage_va("ICMP data length must be between: %d and %d", - sizeof(struct icmp) + sizeof(struct icmp_ping_data), - MAX_PING_DATA - 1); - break; - case 'i': - pkt_interval = get_timevar(optarg); - break; - case 'I': - target_interval = get_timevar(optarg); - break; - case 'w': - get_threshold(optarg, &warn); - break; - case 'c': - get_threshold(optarg, &crit); - break; - case 'n': - case 'p': - packets = strtoul(optarg, NULL, 0); - break; - case 't': - timeout = strtoul(optarg, NULL, 0); - if(!timeout) timeout = 10; - break; - case 'H': - add_target(optarg); - break; - case 'l': - ttl = (unsigned char)strtoul(optarg, NULL, 0); - break; - case 'm': - min_hosts_alive = (int)strtoul(optarg, NULL, 0); - break; - case 'd': /* implement later, for cluster checks */ - warn_down = (unsigned char)strtoul(optarg, &ptr, 0); - if(ptr) { - crit_down = (unsigned char)strtoul(ptr + 1, NULL, 0); - } - break; - case 's': /* specify source IP address */ - set_source_ip(optarg); - break; - case 'V': /* version */ - print_revision (progname, NP_VERSION); - exit (STATE_UNKNOWN); - case 'h': /* help */ - print_help (); - exit (STATE_UNKNOWN); - } - } - } - argv = &argv[optind]; while(*argv) { add_target(*argv); @@ -633,7 +681,7 @@ main(int argc, char **argv) } host = list; - table = malloc(sizeof(struct rta_host **) * targets); + table = (struct rta_host**)malloc(sizeof(struct rta_host **) * targets); i = 0; while(host) { host->id = i*packets; @@ -697,9 +745,15 @@ run_checks() } } + /* response structure: + * IPv4: * ip header : 20 bytes * icmp header : 28 bytes + * IPv6: + * ip header : 40 bytes + * icmp header : 28 bytes + * both: * icmp echo reply : the rest */ static int @@ -707,16 +761,27 @@ wait_for_reply(int sock, u_int t) { int n, hlen; static unsigned char buf[4096]; - struct sockaddr_in resp_addr; - struct ip *ip; - struct icmp icp; + struct sockaddr_storage resp_addr; + union ip_hdr *ip; + union icmp_packet packet; struct rta_host *host; struct icmp_ping_data data; struct timeval wait_start, now; u_int tdiff, i, per_pkt_wait; + if (!(packet.buf = malloc(icmp_pkt_size))) { + crash("send_icmp_ping(): failed to malloc %d bytes for send buffer", + icmp_pkt_size); + return -1; /* might be reached if we're in debug mode */ + } + + memset(packet.buf, 0, icmp_pkt_size); + /* if we can't listen or don't have anything to listen to, just return */ - if(!t || !icmp_pkts_en_route) return 0; + if(!t || !icmp_pkts_en_route) { + free(packet.buf); + return 0; + } gettimeofday(&wait_start, &tz); @@ -735,7 +800,7 @@ wait_for_reply(int sock, u_int t) /* reap responses until we hit a timeout */ n = recvfrom_wto(sock, buf, sizeof(buf), - (struct sockaddr *)&resp_addr, &t, &now); + (struct sockaddr *)&resp_addr, &t, &now); if(!n) { if(debug > 1) { printf("recvfrom_wto() timed out during a %u usecs wait\n", @@ -745,12 +810,23 @@ wait_for_reply(int sock, u_int t) } if(n < 0) { if(debug) printf("recvfrom_wto() returned errors\n"); + free(packet.buf); return n; } - ip = (struct ip *)buf; - if(debug > 1) printf("received %u bytes from %s\n", - ntohs(ip->ip_len), inet_ntoa(resp_addr.sin_addr)); + // FIXME: with ipv6 we don't have an ip header here + if (address_family != AF_INET6) { + ip = (union ip_hdr *)buf; + + if(debug > 1) { + char address[INET6_ADDRSTRLEN]; + parse_address(&resp_addr, address, sizeof(address)); + printf("received %u bytes from %s\n", + address_family == AF_INET6 ? ntohs(ip->ip6.ip6_plen) + : ntohs(ip->ip.ip_len), + address); + } + } /* obsolete. alpha on tru64 provides the necessary defines, but isn't broken */ /* #if defined( __alpha__ ) && __STDC__ && !defined( __GLIBC__ ) */ @@ -759,12 +835,14 @@ wait_for_reply(int sock, u_int t) * off the bottom 4 bits */ /* hlen = (ip->ip_vhl & 0x0f) << 2; */ /* #else */ - hlen = ip->ip_hl << 2; + hlen = (address_family == AF_INET6) ? 0 : ip->ip.ip_hl << 2; /* #endif */ if(n < (hlen + ICMP_MINLEN)) { + char address[INET6_ADDRSTRLEN]; + parse_address(&resp_addr, address, sizeof(address)); crash("received packet too short for ICMP (%d bytes, expected %d) from %s\n", - n, hlen + icmp_pkt_size, inet_ntoa(resp_addr.sin_addr)); + n, hlen + icmp_pkt_size, address); } /* else if(debug) { */ /* printf("ip header size: %u, packet size: %u (expected %u, %u)\n", */ @@ -773,23 +851,39 @@ wait_for_reply(int sock, u_int t) /* } */ /* check the response */ - memcpy(&icp, buf + hlen, sizeof(icp)); - if(ntohs(icp.icmp_id) != pid || icp.icmp_type != ICMP_ECHOREPLY || - ntohs(icp.icmp_seq) >= targets*packets) { + memcpy(packet.buf, buf + hlen, icmp_pkt_size); +/* address_family == AF_INET6 ? sizeof(struct icmp6_hdr) + : sizeof(struct icmp));*/ + + if( (address_family == PF_INET && + (ntohs(packet.icp->icmp_id) != pid || packet.icp->icmp_type != ICMP_ECHOREPLY + || ntohs(packet.icp->icmp_seq) >= targets * packets)) + || (address_family == PF_INET6 && + (ntohs(packet.icp6->icmp6_id) != pid || packet.icp6->icmp6_type != ICMP6_ECHO_REPLY + || ntohs(packet.icp6->icmp6_seq) >= targets * packets))) { if(debug > 2) printf("not a proper ICMP_ECHOREPLY\n"); handle_random_icmp(buf + hlen, &resp_addr); continue; } /* this is indeed a valid response */ - memcpy(&data, icp.icmp_data, sizeof(data)); - if (debug > 2) - printf("ICMP echo-reply of len %lu, id %u, seq %u, cksum 0x%X\n", - (unsigned long)sizeof(data), ntohs(icp.icmp_id), - ntohs(icp.icmp_seq), icp.icmp_cksum); + if (address_family == PF_INET) { + memcpy(&data, packet.icp->icmp_data, sizeof(data)); + if (debug > 2) + printf("ICMP echo-reply of len %lu, id %u, seq %u, cksum 0x%X\n", + (unsigned long)sizeof(data), ntohs(packet.icp->icmp_id), + ntohs(packet.icp->icmp_seq), packet.icp->icmp_cksum); + host = table[ntohs(packet.icp->icmp_seq)/packets]; + } else { + memcpy(&data, &packet.icp6->icmp6_dataun.icmp6_un_data8[4], sizeof(data)); + if (debug > 2) + printf("ICMP echo-reply of len %lu, id %u, seq %u, cksum 0x%X\n", + (unsigned long)sizeof(data), ntohs(packet.icp6->icmp6_id), + ntohs(packet.icp6->icmp6_seq), packet.icp6->icmp6_cksum); + host = table[ntohs(packet.icp6->icmp6_seq)/packets]; + } - host = table[ntohs(icp.icmp_seq)/packets]; tdiff = get_timevaldiff(&data.stime, &now); host->time_waited += tdiff; @@ -801,22 +895,25 @@ wait_for_reply(int sock, u_int t) host->rtmin = tdiff; if(debug) { + char address[INET6_ADDRSTRLEN]; + parse_address(&resp_addr, address, sizeof(address)); printf("%0.3f ms rtt from %s, outgoing ttl: %u, incoming ttl: %u, max: %0.3f, min: %0.3f\n", - (float)tdiff / 1000, inet_ntoa(resp_addr.sin_addr), - ttl, ip->ip_ttl, (float)host->rtmax / 1000, (float)host->rtmin / 1000); + (float)tdiff / 1000, address, + ttl, ip->ip.ip_ttl, (float)host->rtmax / 1000, (float)host->rtmin / 1000); } /* if we're in hostcheck mode, exit with limited printouts */ if(mode == MODE_HOSTCHECK) { printf("OK - %s responds to ICMP. Packet %u, rta %0.3fms|" - "pkt=%u;;0;%u rta=%0.3f;%0.3f;%0.3f;;\n", - host->name, icmp_recv, (float)tdiff / 1000, - icmp_recv, packets, (float)tdiff / 1000, - (float)warn.rta / 1000, (float)crit.rta / 1000); + "pkt=%u;;0;%u rta=%0.3f;%0.3f;%0.3f;;\n", + host->name, icmp_recv, (float)tdiff / 1000, + icmp_recv, packets, (float)tdiff / 1000, + (float)warn.rta / 1000, (float)crit.rta / 1000); exit(STATE_OK); } } + free(packet.buf); return 0; } @@ -824,62 +921,81 @@ wait_for_reply(int sock, u_int t) static int send_icmp_ping(int sock, struct rta_host *host) { - static union { - void *buf; /* re-use so we prevent leaks */ - struct icmp *icp; - u_short *cksum_in; - } packet = { NULL }; long int len; struct icmp_ping_data data; struct msghdr hdr; struct iovec iov; struct timeval tv; - struct sockaddr *addr; + void *buf = NULL; if(sock == -1) { errno = 0; crash("Attempt to send on bogus socket"); return -1; } - addr = (struct sockaddr *)&host->saddr_in; - if(!packet.buf) { - if (!(packet.buf = malloc(icmp_pkt_size))) { + if(!buf) { + if (!(buf = malloc(icmp_pkt_size))) { crash("send_icmp_ping(): failed to malloc %d bytes for send buffer", icmp_pkt_size); return -1; /* might be reached if we're in debug mode */ } } - memset(packet.buf, 0, icmp_pkt_size); + memset(buf, 0, icmp_pkt_size); - if((gettimeofday(&tv, &tz)) == -1) return -1; + if((gettimeofday(&tv, &tz)) == -1) { + free(buf); + return -1; + } data.ping_id = 10; /* host->icmp.icmp_sent; */ memcpy(&data.stime, &tv, sizeof(tv)); - memcpy(&packet.icp->icmp_data, &data, sizeof(data)); - packet.icp->icmp_type = ICMP_ECHO; - packet.icp->icmp_code = 0; - packet.icp->icmp_cksum = 0; - packet.icp->icmp_id = htons(pid); - packet.icp->icmp_seq = htons(host->id++); - packet.icp->icmp_cksum = icmp_checksum(packet.cksum_in, icmp_pkt_size); - - if (debug > 2) - printf("Sending ICMP echo-request of len %lu, id %u, seq %u, cksum 0x%X to host %s\n", - (unsigned long)sizeof(data), ntohs(packet.icp->icmp_id), - ntohs(packet.icp->icmp_seq), packet.icp->icmp_cksum, - host->name); + + if (address_family == AF_INET) { + struct icmp *icp = (struct icmp*)buf; + + memcpy(&icp->icmp_data, &data, sizeof(data)); + + icp->icmp_type = ICMP_ECHO; + icp->icmp_code = 0; + icp->icmp_cksum = 0; + icp->icmp_id = htons(pid); + icp->icmp_seq = htons(host->id++); + icp->icmp_cksum = icmp_checksum((unsigned short*)buf, icmp_pkt_size); + + if (debug > 2) + printf("Sending ICMP echo-request of len %lu, id %u, seq %u, cksum 0x%X to host %s\n", + (unsigned long)sizeof(data), ntohs(icp->icmp_id), ntohs(icp->icmp_seq), icp->icmp_cksum, host->name); + } + else { + struct icmp6_hdr *icp6 = (struct icmp6_hdr*)buf; + memcpy(&icp6->icmp6_dataun.icmp6_un_data8[4], &data, sizeof(data)); + icp6->icmp6_type = ICMP6_ECHO_REQUEST; + icp6->icmp6_code = 0; + icp6->icmp6_cksum = 0; + icp6->icmp6_id = htons(pid); + icp6->icmp6_seq = htons(host->id++); + // let checksum be calculated automatically + + if (debug > 2) { + printf("Sending ICMP echo-request of len %lu, id %u, seq %u, cksum 0x%X to host %s\n", + (unsigned long)sizeof(data), ntohs(icp6->icmp6_id), + ntohs(icp6->icmp6_seq), icp6->icmp6_cksum, host->name); + } + } memset(&iov, 0, sizeof(iov)); - iov.iov_base = packet.buf; + iov.iov_base = buf; iov.iov_len = icmp_pkt_size; memset(&hdr, 0, sizeof(hdr)); - hdr.msg_name = addr; - hdr.msg_namelen = sizeof(struct sockaddr); + hdr.msg_name = (struct sockaddr *)&host->saddr_in; + hdr.msg_namelen = sizeof(struct sockaddr_storage); hdr.msg_iov = &iov; hdr.msg_iovlen = 1; + errno = 0; + /* MSG_CONFIRM is a linux thing and only available on linux kernels >= 2.3.15, see send(2) */ #ifdef MSG_CONFIRM len = sendmsg(sock, &hdr, MSG_CONFIRM); @@ -887,9 +1003,15 @@ send_icmp_ping(int sock, struct rta_host *host) len = sendmsg(sock, &hdr, 0); #endif + free(buf); + if(len < 0 || (unsigned int)len != icmp_pkt_size) { - if(debug) printf("Failed to send ping to %s\n", - inet_ntoa(host->saddr_in.sin_addr)); + if(debug) { + char address[INET6_ADDRSTRLEN]; + parse_address((struct sockaddr_storage *)&host->saddr_in, address, sizeof(address)); + printf("Failed to send ping to %s: %s\n", address, strerror(errno)); + } + errno = 0; return -1; } @@ -934,7 +1056,7 @@ recvfrom_wto(int sock, void *buf, unsigned int len, struct sockaddr *saddr, if(!n) return 0; /* timeout */ - slen = sizeof(struct sockaddr); + slen = sizeof(struct sockaddr_storage); memset(&iov, 0, sizeof(iov)); iov.iov_base = buf; @@ -958,6 +1080,7 @@ recvfrom_wto(int sock, void *buf, unsigned int len, struct sockaddr *saddr, break ; } } + if (!chdr) #endif // SO_TIMESTAMP gettimeofday(tv, &tz); @@ -991,6 +1114,7 @@ finish(int sig) /* iterate thrice to calculate values, give output, and print perfparse */ host = list; + while(host) { if(!host->icmp_recv) { /* rta 0 is ofcourse not entirely correct, but will still show up @@ -1039,10 +1163,12 @@ finish(int sig) if(!host->icmp_recv) { status = STATE_CRITICAL; if(host->flags & FLAG_LOST_CAUSE) { + char address[INET6_ADDRSTRLEN]; + parse_address(&host->error_addr, address, sizeof(address)); printf("%s: %s @ %s. rta nan, lost %d%%", host->name, get_icmp_error_msg(host->icmp_type, host->icmp_code), - inet_ntoa(host->error_addr), + address, 100); } else { /* not marked as lost cause, so we have no flags for it */ @@ -1104,7 +1230,6 @@ get_timevaldiff(struct timeval *early, struct timeval *later) { return 0; } - ret = (later->tv_sec - early->tv_sec) * 1000000; ret += later->tv_usec - early->tv_usec; @@ -1112,18 +1237,35 @@ get_timevaldiff(struct timeval *early, struct timeval *later) } static int -add_target_ip(char *arg, struct in_addr *in) +add_target_ip(char *arg, struct sockaddr_storage *in) { struct rta_host *host; + struct sockaddr_in *sin, *host_sin; + struct sockaddr_in6 *sin6, *host_sin6; - /* disregard obviously stupid addresses */ - if(in->s_addr == INADDR_NONE || in->s_addr == INADDR_ANY) + if (address_family == AF_INET) + sin = (struct sockaddr_in *)in; + else + sin6 = (struct sockaddr_in6 *)in; + + + + /* disregard obviously stupid addresses + * (I didn't find an ipv6 equivalent to INADDR_NONE) */ + if (((address_family == AF_INET && (sin->sin_addr.s_addr == INADDR_NONE + || sin->sin_addr.s_addr == INADDR_ANY))) + || (address_family == AF_INET6 && (sin6->sin6_addr.s6_addr == in6addr_any.s6_addr))) { return -1; + } /* no point in adding two identical IP's, so don't. ;) */ host = list; while(host) { - if(host->saddr_in.sin_addr.s_addr == in->s_addr) { + host_sin = (struct sockaddr_in *)&host->saddr_in; + host_sin6 = (struct sockaddr_in6 *)&host->saddr_in; + + if( (address_family == AF_INET && host_sin->sin_addr.s_addr == sin->sin_addr.s_addr) + || (address_family == AF_INET6 && host_sin6->sin6_addr.s6_addr == sin6->sin6_addr.s6_addr)) { if(debug) printf("Identical IP already exists. Not adding %s\n", arg); return -1; } @@ -1131,19 +1273,29 @@ add_target_ip(char *arg, struct in_addr *in) } /* add the fresh ip */ - host = malloc(sizeof(struct rta_host)); + host = (struct rta_host*)malloc(sizeof(struct rta_host)); if(!host) { + char straddr[INET6_ADDRSTRLEN]; + parse_address((struct sockaddr_storage*)&in, straddr, sizeof(straddr)); crash("add_target_ip(%s, %s): malloc(%d) failed", - arg, inet_ntoa(*in), sizeof(struct rta_host)); + arg, straddr, sizeof(struct rta_host)); } memset(host, 0, sizeof(struct rta_host)); /* set the values. use calling name for output */ host->name = strdup(arg); - /* fill out the sockaddr_in struct */ - host->saddr_in.sin_family = AF_INET; - host->saddr_in.sin_addr.s_addr = in->s_addr; + /* fill out the sockaddr_storage struct */ + if(address_family == AF_INET) { + host_sin = (struct sockaddr_in *)&host->saddr_in; + host_sin->sin_family = AF_INET; + host_sin->sin_addr.s_addr = sin->sin_addr.s_addr; + } + else { + host_sin6 = (struct sockaddr_in6 *)&host->saddr_in; + host_sin6->sin6_family = AF_INET6; + memcpy(host_sin6->sin6_addr.s6_addr, sin6->sin6_addr.s6_addr, sizeof host_sin6->sin6_addr.s6_addr); + } host->rtmin = DBL_MAX; @@ -1160,31 +1312,45 @@ add_target_ip(char *arg, struct in_addr *in) static int add_target(char *arg) { - int i; - struct hostent *he; - struct in_addr *in, ip; + int error, result; + struct sockaddr_storage ip; + struct addrinfo hints, *res, *p; + struct sockaddr_in *sin; + struct sockaddr_in6 *sin6; + + switch (address_family) { + case AF_INET: + sin = (struct sockaddr_in *)&ip; + result = inet_pton(address_family, arg, &sin->sin_addr); + break; + case AF_INET6: + sin6 = (struct sockaddr_in6 *)&ip; + result = inet_pton(address_family, arg, &sin6->sin6_addr); + break; + default: crash("Address family not supported"); + } /* don't resolve if we don't have to */ - if((ip.s_addr = inet_addr(arg)) != INADDR_NONE) { + if(result == 1) { /* don't add all ip's if we were given a specific one */ return add_target_ip(arg, &ip); - /* he = gethostbyaddr((char *)in, sizeof(struct in_addr), AF_INET); */ - /* if(!he) return add_target_ip(arg, in); */ } else { errno = 0; - he = gethostbyname(arg); - if(!he) { + memset(&hints, 0, sizeof(hints)); + hints.ai_family = address_family == AF_INET ? PF_INET : PF_INET6; + hints.ai_socktype = SOCK_RAW; + if((error = getaddrinfo(arg, NULL, &hints, &res)) != 0) { errno = 0; - crash("Failed to resolve %s", arg); + crash("Failed to resolve %s: %s", arg, gai_strerror(error)); return -1; } } /* possibly add all the IP's as targets */ - for(i = 0; he->h_addr_list[i]; i++) { - in = (struct in_addr *)he->h_addr_list[i]; - add_target_ip(arg, in); + for(p = res; p != NULL; p = p->ai_next) { + memcpy(&ip, p->ai_addr, p->ai_addrlen); + add_target_ip(arg, &ip); /* this is silly, but it works */ if(mode == MODE_HOSTCHECK || mode == MODE_ALL) { @@ -1193,6 +1359,7 @@ add_target(char *arg) } break; } + freeaddrinfo(res); return 0; } @@ -1203,7 +1370,7 @@ set_source_ip(char *arg) struct sockaddr_in src; memset(&src, 0, sizeof(src)); - src.sin_family = AF_INET; + src.sin_family = address_family; if((src.sin_addr.s_addr = inet_addr(arg)) == INADDR_NONE) src.sin_addr.s_addr = get_ip_address(arg); if(bind(icmp_sock, (struct sockaddr *)&src, sizeof(src)) == -1) @@ -1311,12 +1478,12 @@ get_threshold(char *str, threshold *th) unsigned short icmp_checksum(unsigned short *p, int n) { - register unsigned short cksum; - register long sum = 0; + unsigned short cksum; + long sum = 0; - while(n > 1) { + while(n > 2) { sum += *p++; - n -= 2; + n -= sizeof(unsigned short); } /* mop up the occasional odd byte */ @@ -1347,6 +1514,8 @@ print_help(void) printf (" %s\n", "-H"); printf (" %s\n", _("specify a target")); + printf (" %s\n", "[-4|-6]"); + printf (" %s\n", _("Use IPv4 (default) or IPv6 to communicate with the targets")); printf (" %s\n", "-w"); printf (" %s", _("warning threshold (currently ")); printf ("%0.3fms,%u%%)\n", (float)warn.rta / 1000, warn.pl); -- cgit v0.10-9-g596f From 248bebb037b4ac3a184706ec715f82a6ed4749a5 Mon Sep 17 00:00:00 2001 From: Lars Michelsen Date: Thu, 22 Oct 2015 16:40:02 +0200 Subject: Fixed parameter handling after 01efbb2183d49c5082598d4799788fc385342f28 diff --git a/plugins-root/check_icmp.c b/plugins-root/check_icmp.c index 6aeedf4..4c17d23 100644 --- a/plugins-root/check_icmp.c +++ b/plugins-root/check_icmp.c @@ -422,6 +422,44 @@ main(int argc, char **argv) address_family = AF_INET; int icmp_proto = IPPROTO_ICMP; + /* get calling name the old-fashioned way for portability instead + * of relying on the glibc-ism __progname */ + ptr = strrchr(argv[0], '/'); + if(ptr) progname = &ptr[1]; + else progname = argv[0]; + + /* now set defaults. Use progname to set them initially (allows for + * superfast check_host program when target host is up */ + cursor = list = NULL; + table = NULL; + + mode = MODE_RTA; + crit.rta = 500000; + crit.pl = 80; + warn.rta = 200000; + warn.pl = 40; + protocols = HAVE_ICMP | HAVE_UDP | HAVE_TCP; + pkt_interval = 80000; /* 80 msec packet interval by default */ + packets = 5; + + if(!strcmp(progname, "check_icmp") || !strcmp(progname, "check_ping")) { + mode = MODE_ICMP; + protocols = HAVE_ICMP; + } + else if(!strcmp(progname, "check_host")) { + mode = MODE_HOSTCHECK; + pkt_interval = 1000000; + packets = 5; + crit.rta = warn.rta = 1000000; + crit.pl = warn.pl = 100; + } + else if(!strcmp(progname, "check_rta_multi")) { + mode = MODE_ALL; + target_interval = 0; + pkt_interval = 50000; + packets = 5; + } + /* parse the arguments */ for(i = 1; i < argc; i++) { while((arg = getopt(argc, argv, "vhVw:c:n:p:t:H:s:i:b:I:l:m:64")) != EOF) { @@ -533,44 +571,6 @@ main(int argc, char **argv) pid = getpid() & 0xffff; /* printf("pid = %u\n", pid); */ - /* get calling name the old-fashioned way for portability instead - * of relying on the glibc-ism __progname */ - ptr = strrchr(argv[0], '/'); - if(ptr) progname = &ptr[1]; - else progname = argv[0]; - - /* now set defaults. Use progname to set them initially (allows for - * superfast check_host program when target host is up */ - cursor = list = NULL; - table = NULL; - - mode = MODE_RTA; - crit.rta = 500000; - crit.pl = 80; - warn.rta = 200000; - warn.pl = 40; - protocols = HAVE_ICMP | HAVE_UDP | HAVE_TCP; - pkt_interval = 80000; /* 80 msec packet interval by default */ - packets = 5; - - if(!strcmp(progname, "check_icmp") || !strcmp(progname, "check_ping")) { - mode = MODE_ICMP; - protocols = HAVE_ICMP; - } - else if(!strcmp(progname, "check_host")) { - mode = MODE_HOSTCHECK; - pkt_interval = 1000000; - packets = 5; - crit.rta = warn.rta = 1000000; - crit.pl = warn.pl = 100; - } - else if(!strcmp(progname, "check_rta_multi")) { - mode = MODE_ALL; - target_interval = 0; - pkt_interval = 50000; - packets = 5; - } - /* Parse extra opts if any */ argv=np_extra_opts(&argc, argv, progname); -- cgit v0.10-9-g596f From 8edac9421f8ce28ab51917de4e056ac609eadd49 Mon Sep 17 00:00:00 2001 From: Jacob Hansen Date: Thu, 29 Nov 2018 16:02:10 +0100 Subject: check_icmp: Automatically detect IP protocol This patch automatically detects whether the protocol version is IPv4 or IPv6 All credits to: https://github.com/ghciv6 Signed-off-by: Jacob Hansen diff --git a/plugins-root/check_icmp.c b/plugins-root/check_icmp.c index 4c17d23..cab69fd 100644 --- a/plugins-root/check_icmp.c +++ b/plugins-root/check_icmp.c @@ -419,7 +419,7 @@ main(int argc, char **argv) * that before pointer magic (esp. on network data) */ icmp_sockerrno = udp_sockerrno = tcp_sockerrno = sockets = 0; - address_family = AF_INET; + address_family = -1; int icmp_proto = IPPROTO_ICMP; /* get calling name the old-fashioned way for portability instead @@ -526,12 +526,10 @@ main(int argc, char **argv) break; case '4': address_family = AF_INET; - icmp_proto = IPPROTO_ICMP; break; case '6': #ifdef USE_IPV6 address_family = AF_INET6; - icmp_proto = IPPROTO_ICMPV6; #else usage (_("IPv6 support not available\n")); #endif @@ -540,29 +538,6 @@ main(int argc, char **argv) } } - if((icmp_sock = socket(address_family, SOCK_RAW, icmp_proto)) != -1) - sockets |= HAVE_ICMP; - else icmp_sockerrno = errno; - - /* if((udp_sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP)) != -1) */ - /* sockets |= HAVE_UDP; */ - /* else udp_sockerrno = errno; */ - - /* if((tcp_sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) != -1) */ - /* sockets |= HAVE_TCP; */ - /* else tcp_sockerrno = errno; */ - - /* now drop privileges (no effect if not setsuid or geteuid() == 0) */ - if (setuid(getuid()) == -1) { - printf("ERROR: Failed to drop privileges\n"); - return 1; - } - -#ifdef SO_TIMESTAMP - if(setsockopt(icmp_sock, SOL_SOCKET, SO_TIMESTAMP, &on, sizeof(on))) - if(debug) printf("Warning: no SO_TIMESTAMP support\n"); -#endif // SO_TIMESTAMP - /* POSIXLY_CORRECT might break things, so unset it (the portable way) */ environ = NULL; @@ -593,6 +568,37 @@ main(int argc, char **argv) exit(3); } + // add_target might change address_family + switch ( address_family ){ + case AF_INET: icmp_proto = IPPROTO_ICMP; + break; + case AF_INET6: icmp_proto = IPPROTO_ICMPV6; + break; + default: crash("Address family not supported"); + } + if((icmp_sock = socket(address_family, SOCK_RAW, icmp_proto)) != -1) + sockets |= HAVE_ICMP; + else icmp_sockerrno = errno; + + /* if((udp_sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP)) != -1) */ + /* sockets |= HAVE_UDP; */ + /* else udp_sockerrno = errno; */ + + /* if((tcp_sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) != -1) */ + /* sockets |= HAVE_TCP; */ + /* else tcp_sockerrno = errno; */ + +#ifdef SO_TIMESTAMP + if(setsockopt(icmp_sock, SOL_SOCKET, SO_TIMESTAMP, &on, sizeof(on))) + if(debug) printf("Warning: no SO_TIMESTAMP support\n"); +#endif // SO_TIMESTAMP + + /* now drop privileges (no effect if not setsuid or geteuid() == 0) */ + if (setuid(getuid()) == -1) { + printf("ERROR: Failed to drop privileges\n"); + return 1; + } + if(!sockets) { if(icmp_sock == -1) { errno = icmp_sockerrno; @@ -1319,6 +1325,19 @@ add_target(char *arg) struct sockaddr_in6 *sin6; switch (address_family) { + case -1: + // -4 and -6 are not specified on cmdline + address_family = AF_INET; + sin = (struct sockaddr_in *)&ip; + result = inet_pton(address_family, arg, &sin->sin_addr); +#ifdef USE_IPV6 + if( result != 1 ){ + address_family = AF_INET6; + sin6 = (struct sockaddr_in6 *)&ip; + result = inet_pton(address_family, arg, &sin6->sin6_addr); + } +#endif + break; case AF_INET: sin = (struct sockaddr_in *)&ip; result = inet_pton(address_family, arg, &sin->sin_addr); -- cgit v0.10-9-g596f From e3ade3374a99155c8c70d89a8d8116240d5c8df0 Mon Sep 17 00:00:00 2001 From: Jacob Hansen Date: Mon, 3 Dec 2018 14:19:27 +0000 Subject: check_icmp: process protocol version args first Detection of protocol version is in the previous patch implemented in the add_target() function, which is called when adding the -H command line argument. That means that if a protocal version argument (-4, -6) is added after the -H then the protocol version might be incorrectly set. This patch ensures that we first process the protocol version arguments, and then we process the rest of the arguments. Signed-off-by: Jacob Hansen diff --git a/plugins-root/check_icmp.c b/plugins-root/check_icmp.c index cab69fd..b978ee1 100644 --- a/plugins-root/check_icmp.c +++ b/plugins-root/check_icmp.c @@ -460,6 +460,28 @@ main(int argc, char **argv) packets = 5; } + /* Parse protocol arguments first */ + for(i = 1; i < argc; i++) { + while((arg = getopt(argc, argv, "vhVw:c:n:p:t:H:s:i:b:I:l:m:64")) != EOF) { + unsigned short size; + switch(arg) { + case '4': + address_family = AF_INET; + break; + case '6': +#ifdef USE_IPV6 + address_family = AF_INET6; +#else + usage (_("IPv6 support not available\n")); +#endif + break; + } + } + } + + /* Reset argument scanning */ + optind = 1; + /* parse the arguments */ for(i = 1; i < argc; i++) { while((arg = getopt(argc, argv, "vhVw:c:n:p:t:H:s:i:b:I:l:m:64")) != EOF) { @@ -524,16 +546,6 @@ main(int argc, char **argv) print_help (); exit (STATE_UNKNOWN); break; - case '4': - address_family = AF_INET; - break; - case '6': -#ifdef USE_IPV6 - address_family = AF_INET6; -#else - usage (_("IPv6 support not available\n")); -#endif - break; } } } -- cgit v0.10-9-g596f From e5eccb663efb40c010ed608f5900b341592cfa68 Mon Sep 17 00:00:00 2001 From: Jacob Hansen Date: Mon, 3 Dec 2018 14:23:58 +0000 Subject: check_icmp: removed outcommented code Signed-off-by: Jacob Hansen diff --git a/plugins-root/check_icmp.c b/plugins-root/check_icmp.c index b978ee1..749e2d4 100644 --- a/plugins-root/check_icmp.c +++ b/plugins-root/check_icmp.c @@ -592,13 +592,6 @@ main(int argc, char **argv) sockets |= HAVE_ICMP; else icmp_sockerrno = errno; - /* if((udp_sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP)) != -1) */ - /* sockets |= HAVE_UDP; */ - /* else udp_sockerrno = errno; */ - - /* if((tcp_sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) != -1) */ - /* sockets |= HAVE_TCP; */ - /* else tcp_sockerrno = errno; */ #ifdef SO_TIMESTAMP if(setsockopt(icmp_sock, SOL_SOCKET, SO_TIMESTAMP, &on, sizeof(on))) -- cgit v0.10-9-g596f From 0882b4201bfa0608b90448c4866b571a20e1fd66 Mon Sep 17 00:00:00 2001 From: Jacob Hansen Date: Thu, 6 Dec 2018 15:33:34 +0000 Subject: check_icmp: Correctly set address_family on lookup If a hostname is supplied instead of an IP address, the automatic address family detection would fail to correctly set the IP protocol version (it would always be IPv6). We now supply AF_UNSPEC to getaddrinfo, which should then return the correct address family in the result. Signed-off-by: Jacob Hansen diff --git a/plugins-root/check_icmp.c b/plugins-root/check_icmp.c index 749e2d4..1a2a177 100644 --- a/plugins-root/check_icmp.c +++ b/plugins-root/check_icmp.c @@ -1362,13 +1362,14 @@ add_target(char *arg) else { errno = 0; memset(&hints, 0, sizeof(hints)); - hints.ai_family = address_family == AF_INET ? PF_INET : PF_INET6; + hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_RAW; if((error = getaddrinfo(arg, NULL, &hints, &res)) != 0) { errno = 0; crash("Failed to resolve %s: %s", arg, gai_strerror(error)); return -1; } + address_family = res->ai_family; } /* possibly add all the IP's as targets */ -- cgit v0.10-9-g596f From 270e7cba3830328424e3f2b6b9a6989b1ea5f89b Mon Sep 17 00:00:00 2001 From: Vincent Danjean Date: Sat, 8 Dec 2018 21:51:05 +0100 Subject: [check_disk] add support to display inodes usage in perfdata This is not enabled by default It can be enabled with the -P (--iperfdata) option diff --git a/gl/fsusage.c b/gl/fsusage.c index 0657555..6103ecf 100644 --- a/gl/fsusage.c +++ b/gl/fsusage.c @@ -143,6 +143,7 @@ get_fs_usage (char const *file, char const *disk, struct fs_usage *fsp) fsp->fsu_bavail_top_bit_set = EXTRACT_TOP_BIT (vfsd.f_bavail) != 0; fsp->fsu_files = PROPAGATE_ALL_ONES (vfsd.f_files); fsp->fsu_ffree = PROPAGATE_ALL_ONES (vfsd.f_ffree); + fsp->fsu_favail = PROPAGATE_ALL_ONES (vfsd.f_favail); return 0; } @@ -174,6 +175,7 @@ get_fs_usage (char const *file, char const *disk, struct fs_usage *fsp) fsp->fsu_bavail_top_bit_set = EXTRACT_TOP_BIT (fsd.fd_req.bfreen) != 0; fsp->fsu_files = PROPAGATE_ALL_ONES (fsd.fd_req.gtot); fsp->fsu_ffree = PROPAGATE_ALL_ONES (fsd.fd_req.gfree); + fsp->fsu_favail = PROPAGATE_ALL_ONES (fsd.fd_req.gfree); #elif defined STAT_READ_FILSYS /* SVR2 */ # ifndef SUPERBOFF @@ -209,6 +211,7 @@ get_fs_usage (char const *file, char const *disk, struct fs_usage *fsp) ? UINTMAX_MAX : (fsd.s_isize - 2) * INOPB * (fsd.s_type == Fs2b ? 2 : 1)); fsp->fsu_ffree = PROPAGATE_ALL_ONES (fsd.s_tinode); + fsp->fsu_favail = PROPAGATE_ALL_ONES (fsd.s_tinode); #elif defined STAT_STATFS3_OSF1 /* OSF/1 */ @@ -296,6 +299,7 @@ get_fs_usage (char const *file, char const *disk, struct fs_usage *fsp) fsp->fsu_bavail_top_bit_set = EXTRACT_TOP_BIT (fsd.f_bavail) != 0; fsp->fsu_files = PROPAGATE_ALL_ONES (fsd.f_files); fsp->fsu_ffree = PROPAGATE_ALL_ONES (fsd.f_ffree); + fsp->fsu_favail = PROPAGATE_ALL_ONES (fsd.f_ffree); #endif @@ -323,6 +327,7 @@ statfs (char *file, struct statfs *fsb) fsb->f_bavail = fsd.du_tfree; fsb->f_files = (fsd.du_isize - 2) * fsd.du_inopb; fsb->f_ffree = fsd.du_tinode; + fsb->f_favail = fsd.du_tinode; fsb->f_fsid.val[0] = fsd.du_site; fsb->f_fsid.val[1] = fsd.du_pckno; return 0; diff --git a/gl/fsusage.h b/gl/fsusage.h index 7810fc0..e2654fd 100644 --- a/gl/fsusage.h +++ b/gl/fsusage.h @@ -32,7 +32,8 @@ struct fs_usage uintmax_t fsu_bavail; /* Free blocks available to non-superuser. */ bool fsu_bavail_top_bit_set; /* 1 if fsu_bavail represents a value < 0. */ uintmax_t fsu_files; /* Total file nodes. */ - uintmax_t fsu_ffree; /* Free file nodes. */ + uintmax_t fsu_ffree; /* Free file nodes to superuser. */ + uintmax_t fsu_favail; /* Free file nodes to non-superuser. */ }; int get_fs_usage (char const *file, char const *disk, struct fs_usage *fsp); diff --git a/lib/utils_disk.c b/lib/utils_disk.c index efe35fc..c7c9126 100644 --- a/lib/utils_disk.c +++ b/lib/utils_disk.c @@ -69,6 +69,8 @@ np_add_parameter(struct parameter_list **list, const char *name) new_path->dtotal_units = 0; new_path->inodes_total = 0; new_path->inodes_free = 0; + new_path->inodes_free_to_root = 0; + new_path->inodes_used = 0; new_path->dused_inodes_percent = 0; new_path->dfree_inodes_percent = 0; diff --git a/lib/utils_disk.h b/lib/utils_disk.h index 83a3763..999270c 100644 --- a/lib/utils_disk.h +++ b/lib/utils_disk.h @@ -24,7 +24,8 @@ struct parameter_list char *group; struct mount_entry *best_match; struct parameter_list *name_next; - uintmax_t total, available, available_to_root, used, inodes_free, inodes_total; + uintmax_t total, available, available_to_root, used, + inodes_free, inodes_free_to_root, inodes_used, inodes_total; double dfree_pct, dused_pct; double dused_units, dfree_units, dtotal_units; double dused_inodes_percent, dfree_inodes_percent; diff --git a/plugins/check_disk.c b/plugins/check_disk.c index e73a008..1c43e85 100644 --- a/plugins/check_disk.c +++ b/plugins/check_disk.c @@ -141,6 +141,7 @@ int erronly = FALSE; int display_mntp = FALSE; int exact_match = FALSE; int freespace_ignore_reserved = FALSE; +int display_inodes_perfdata = FALSE; char *warn_freespace_units = NULL; char *crit_freespace_units = NULL; char *warn_freespace_percent = NULL; @@ -167,6 +168,7 @@ main (int argc, char **argv) char *output; char *details; char *perf; + char *perf_ilabel; char *preamble; char *flag_header; double inode_space_pct; @@ -186,6 +188,7 @@ main (int argc, char **argv) output = strdup (""); details = strdup (""); perf = strdup (""); + perf_ilabel = strdup (""); stat_buf = malloc(sizeof *stat_buf); setlocale (LC_ALL, ""); @@ -348,6 +351,29 @@ main (int argc, char **argv) TRUE, 0, TRUE, path->dtotal_units)); + if (display_inodes_perfdata) { + /* *_high_tide must be reinitialized at each run */ + warning_high_tide = UINT_MAX; + critical_high_tide = UINT_MAX; + + if (path->freeinodes_percent->warning != NULL) { + warning_high_tide = abs( min( (double) warning_high_tide, (double) (1.0 - path->freeinodes_percent->warning->end/100)*path->inodes_total )); + } + if (path->freeinodes_percent->critical != NULL) { + critical_high_tide = abs( min( (double) critical_high_tide, (double) (1.0 - path->freeinodes_percent->critical->end/100)*path->inodes_total )); + } + + xasprintf (&perf_ilabel, "%s (inodes)", (!strcmp(me->me_mountdir, "none") || display_mntp) ? me->me_devname : me->me_mountdir); + /* Nb: *_high_tide are unset when == UINT_MAX */ + xasprintf (&perf, "%s %s", perf, + perfdata (perf_ilabel, + path->inodes_used, "", + (warning_high_tide != UINT_MAX ? TRUE : FALSE), warning_high_tide, + (critical_high_tide != UINT_MAX ? TRUE : FALSE), critical_high_tide, + TRUE, 0, + TRUE, path->inodes_total)); + } + if (disk_result==STATE_OK && erronly && !verbose) continue; @@ -455,6 +481,7 @@ process_arguments (int argc, char **argv) {"ignore-eregi-partition", required_argument, 0, 'I'}, {"local", no_argument, 0, 'l'}, {"stat-remote-fs", no_argument, 0, 'L'}, + {"iperfdata", no_argument, 0, 'P'}, {"mountpoint", no_argument, 0, 'M'}, {"errors-only", no_argument, 0, 'e'}, {"exact-match", no_argument, 0, 'E'}, @@ -477,7 +504,7 @@ process_arguments (int argc, char **argv) strcpy (argv[c], "-t"); while (1) { - c = getopt_long (argc, argv, "+?VqhvefCt:c:w:K:W:u:p:x:X:N:mklLg:R:r:i:I:MEA", longopts, &option); + c = getopt_long (argc, argv, "+?VqhvefCt:c:w:K:W:u:p:x:X:N:mklLPg:R:r:i:I:MEA", longopts, &option); if (c == -1 || c == EOF) break; @@ -582,9 +609,13 @@ process_arguments (int argc, char **argv) break; case 'L': stat_remote_fs = 1; + /* fallthrough */ case 'l': show_local_fs = 1; break; + case 'P': + display_inodes_perfdata = 1; + break; case 'p': /* select path */ if (! (warn_freespace_units || crit_freespace_units || warn_freespace_percent || crit_freespace_percent || warn_usedspace_units || crit_usedspace_units || @@ -1012,6 +1043,8 @@ get_stats (struct parameter_list *p, struct fs_usage *fsp) { p->dtotal_units += p_list->dtotal_units; p->inodes_total += p_list->inodes_total; p->inodes_free += p_list->inodes_free; + p->inodes_free_to_root += p_list->inodes_free_to_root; + p->inodes_used += p_list->inodes_used; } first = 0; } @@ -1050,7 +1083,18 @@ get_path_stats (struct parameter_list *p, struct fs_usage *fsp) { p->dused_units = p->used*fsp->fsu_blocksize/mult; p->dfree_units = p->available*fsp->fsu_blocksize/mult; p->dtotal_units = p->total*fsp->fsu_blocksize/mult; - p->inodes_total = fsp->fsu_files; /* Total file nodes. */ - p->inodes_free = fsp->fsu_ffree; /* Free file nodes. */ + /* Free file nodes. Not sure the workaround is required, but in case...*/ + p->inodes_free = fsp->fsu_favail > fsp->fsu_ffree ? 0 : fsp->fsu_favail; + p->inodes_free_to_root = fsp->fsu_ffree; /* Free file nodes for root. */ + p->inodes_used = fsp->fsu_files - fsp->fsu_ffree; + if (freespace_ignore_reserved) { + /* option activated : we substract the root-reserved inodes from the total */ + /* not all OS report fsp->fsu_favail, only the ones with statvfs syscall */ + /* for others, fsp->fsu_ffree == fsp->fsu_favail */ + p->inodes_total = fsp->fsu_files - p->inodes_free_to_root + p->inodes_free; + } else { + /* default behaviour : take all the inodes into account */ + p->inodes_total = fsp->fsu_files; + } np_add_name(&seen, p->best_match->me_mountdir); } -- cgit v0.10-9-g596f From 7a660b3f018e0903e098cbd2e766a9af18f6723f Mon Sep 17 00:00:00 2001 From: Jacob Hansen Date: Mon, 10 Dec 2018 13:43:09 +0000 Subject: check_icmp: move opts string into a variable This commit moves the opts string into a variable as it is now used twice. Signed-off-by: Jacob Hansen diff --git a/plugins-root/check_icmp.c b/plugins-root/check_icmp.c index 1a2a177..6a883a8 100644 --- a/plugins-root/check_icmp.c +++ b/plugins-root/check_icmp.c @@ -410,6 +410,7 @@ main(int argc, char **argv) #ifdef SO_TIMESTAMP int on = 1; #endif + char * opts_str = "vhVw:c:n:p:t:H:s:i:b:I:l:m:64"; setlocale (LC_ALL, ""); bindtextdomain (PACKAGE, LOCALEDIR); @@ -462,7 +463,7 @@ main(int argc, char **argv) /* Parse protocol arguments first */ for(i = 1; i < argc; i++) { - while((arg = getopt(argc, argv, "vhVw:c:n:p:t:H:s:i:b:I:l:m:64")) != EOF) { + while((arg = getopt(argc, argv, opts_str)) != EOF) { unsigned short size; switch(arg) { case '4': @@ -484,7 +485,7 @@ main(int argc, char **argv) /* parse the arguments */ for(i = 1; i < argc; i++) { - while((arg = getopt(argc, argv, "vhVw:c:n:p:t:H:s:i:b:I:l:m:64")) != EOF) { + while((arg = getopt(argc, argv, opts_str)) != EOF) { unsigned short size; switch(arg) { case 'v': -- cgit v0.10-9-g596f From ca6efcd02b203e9e07b869af050c1b9849e04608 Mon Sep 17 00:00:00 2001 From: Jacob Hansen Date: Mon, 10 Dec 2018 13:49:13 +0000 Subject: check_icmp: emit error if multiple protocol version As we do not support checking both IPv4 and IPv6 hosts in one execution we emit an error if multiple protocol versions are defined in the cmd line args. Signed-off-by: Jacob Hansen diff --git a/plugins-root/check_icmp.c b/plugins-root/check_icmp.c index 6a883a8..98891f0 100644 --- a/plugins-root/check_icmp.c +++ b/plugins-root/check_icmp.c @@ -467,10 +467,14 @@ main(int argc, char **argv) unsigned short size; switch(arg) { case '4': + if (address_family != -1) + crash("Multiple protocol versions not supported"); address_family = AF_INET; break; case '6': #ifdef USE_IPV6 + if (address_family != -1) + crash("Multiple protocol versions not supported"); address_family = AF_INET6; #else usage (_("IPv6 support not available\n")); -- cgit v0.10-9-g596f From 4a4ef0d6898cd1590561cd7685d1b8b02b757823 Mon Sep 17 00:00:00 2001 From: Jacob Hansen Date: Mon, 10 Dec 2018 14:00:43 +0000 Subject: check_icmp: Do not overwrite -4,-6 on lookup In case we needed to do a lookup, we previously overwrote the address_family to IPv6, even if we supplied -4 as a cmd line argument. This commit should ensure the cmd line argument is always followed. Signed-off-by: Jacob Hansen diff --git a/plugins-root/check_icmp.c b/plugins-root/check_icmp.c index 98891f0..e45fdf6 100644 --- a/plugins-root/check_icmp.c +++ b/plugins-root/check_icmp.c @@ -1336,7 +1336,7 @@ add_target(char *arg) switch (address_family) { case -1: - // -4 and -6 are not specified on cmdline + /* -4 and -6 are not specified on cmdline */ address_family = AF_INET; sin = (struct sockaddr_in *)&ip; result = inet_pton(address_family, arg, &sin->sin_addr); @@ -1347,6 +1347,10 @@ add_target(char *arg) result = inet_pton(address_family, arg, &sin6->sin6_addr); } #endif + /* If we don't find any valid addresses, we still don't know the address_family */ + if ( result != 1) { + address_family = -1; + } break; case AF_INET: sin = (struct sockaddr_in *)&ip; @@ -1367,7 +1371,11 @@ add_target(char *arg) else { errno = 0; memset(&hints, 0, sizeof(hints)); - hints.ai_family = AF_UNSPEC; + if (address_family == -1) { + hints.ai_family = AF_UNSPEC; + } else { + hints.ai_family = address_family == AF_INET ? PF_INET : PF_INET6; + } hints.ai_socktype = SOCK_RAW; if((error = getaddrinfo(arg, NULL, &hints, &res)) != 0) { errno = 0; -- cgit v0.10-9-g596f From f79f016238f436b94dfeb04c981de3a6d3e9d31c Mon Sep 17 00:00:00 2001 From: Kostyantyn Hushchyn Date: Tue, 7 Sep 2010 14:54:20 +0300 Subject: check_hpjd: Added -D option to disable warning on 'out of paper' Signed-off-by: Kostyantyn Hushchyn diff --git a/plugins/check_hpjd.c b/plugins/check_hpjd.c index f159f5a..6546556 100644 --- a/plugins/check_hpjd.c +++ b/plugins/check_hpjd.c @@ -67,6 +67,7 @@ void print_usage (void); char *community = NULL; char *address = NULL; char *port = NULL; +int check_paper_out = 1; int main (int argc, char **argv) @@ -240,7 +241,8 @@ main (int argc, char **argv) strcpy (errmsg, _("Paper Jam")); } else if (paper_out) { - result = STATE_WARNING; + if (check_paper_out) + result = STATE_WARNING; strcpy (errmsg, _("Out of Paper")); } else if (line_status == OFFLINE) { @@ -325,7 +327,7 @@ process_arguments (int argc, char **argv) while (1) { - c = getopt_long (argc, argv, "+hVH:C:p:", longopts, &option); + c = getopt_long (argc, argv, "+hVH:C:p:D", longopts, &option); if (c == -1 || c == EOF || c == 1) break; @@ -347,6 +349,8 @@ process_arguments (int argc, char **argv) usage2 (_("Port must be a positive short integer"), optarg); else port = atoi(optarg); + case 'D': /* disable paper out check*/ + check_paper_out = 0; break; case 'V': /* version */ print_revision (progname, NP_VERSION); @@ -420,6 +424,8 @@ print_help (void) printf (" %s", _("Specify the port to check ")); printf (_("(default=%s)"), DEFAULT_PORT); printf ("\n"); + printf (" %s\n", "-D"); + printf (" %s", _("Disable paper check ")); printf (UT_SUPPORT); } @@ -430,5 +436,5 @@ void print_usage (void) { printf ("%s\n", _("Usage:")); - printf ("%s -H host [-C community] [-p port]\n", progname); + printf ("%s -H host [-C community] [-p port] [-D]\n", progname); } -- cgit v0.10-9-g596f From 8520c643dd35bbeebbf36c7145d3f8c12dfaf70b Mon Sep 17 00:00:00 2001 From: Iustin Pop Date: Fri, 18 Jan 2019 22:52:54 +0100 Subject: Simply initializes n before it is used When SSL is enabled, n is assigned the size of the server's second EHLO response (I think in bytes), which will usually be significantly higher than the command passed. As such, no commands are executed and no responses are checked, which - silently - defeats the desired checks and results in a success value. diff --git a/plugins/check_smtp.c b/plugins/check_smtp.c index 0fcf4c6..d37c57c 100644 --- a/plugins/check_smtp.c +++ b/plugins/check_smtp.c @@ -293,6 +293,7 @@ main (int argc, char **argv) printf("%s", buffer); } + n = 0; while (n < ncommands) { xasprintf (&cmd_str, "%s%s", commands[n], "\r\n"); my_send(cmd_str, strlen(cmd_str)); -- cgit v0.10-9-g596f From 22f47fc0c51266bd25491da30fd165a845a1391b Mon Sep 17 00:00:00 2001 From: Sven Nierlein Date: Mon, 21 Jan 2019 17:41:02 +0100 Subject: tests: make check_snmp test more reliable The check_snmp rate tests depend on the exact amount of time spend between the plugin runs and will fail on busy machines, ex. the ci servers. Using faketime mitigates this issue and also removes all the sleeps. Signed-off-by: Sven Nierlein diff --git a/plugins/tests/check_snmp.t b/plugins/tests/check_snmp.t index 73a68b2..85d6bf5 100755 --- a/plugins/tests/check_snmp.t +++ b/plugins/tests/check_snmp.t @@ -7,6 +7,7 @@ use strict; use Test::More; use NPTest; use FindBin qw($Bin); +use POSIX qw/strftime/; my $tests = 67; # Check that all dependent modules are available @@ -37,6 +38,7 @@ if ($@) { my $port_snmp = 16100 + int(rand(100)); +my $faketime = -x '/usr/bin/faketime' ? 1 : 0; # Start up server my @pids; @@ -118,77 +120,81 @@ like($res->output, '/'.quotemeta('SNMP OK - And now have fun with with this: \"C "And now have fun with with this: \"C:\\\\\" because we\'re not done yet!"').'/m', "Attempt to confuse parser No.3"); -system("rm -f ".$ENV{'MP_STATE_PATH'}."/check_snmp/*"); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" ); -is($res->return_code, 0, "Returns OK"); -is($res->output, "No previous data to calculate rate - assume okay"); +system("rm -f ".$ENV{'MP_STATE_PATH'}."/*/check_snmp/*"); -# Need to sleep, otherwise duration=0 -sleep 1; +# run rate checks with faketime. rate checks depend on the exact amount of time spend between the +# plugin runs which may fail on busy machines. +# using faketime removes this race condition and also saves all the sleeps in between. +SKIP: { + skip "No faketime binary found", 28 if !$faketime; -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" ); -is($res->return_code, 1, "WARNING - due to going above rate calculation" ); -is($res->output, "SNMP RATE WARNING - *666* | iso.3.6.1.4.1.8072.3.2.67.10=666;600 "); + my $ts = time(); + $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" ); + is($res->return_code, 0, "Returns OK"); + is($res->output, "No previous data to calculate rate - assume okay"); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" ); -is($res->return_code, 3, "UNKNOWN - basically the divide by zero error" ); -is($res->output, "Time duration between plugin calls is invalid"); + # test rate 1 second later + $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" ); + is($res->return_code, 1, "WARNING - due to going above rate calculation" ); + is($res->output, "SNMP RATE WARNING - *666* | iso.3.6.1.4.1.8072.3.2.67.10=666;600 "); + # test rate with same time + $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" ); + is($res->return_code, 3, "UNKNOWN - basically the divide by zero error" ); + is($res->output, "Time duration between plugin calls is invalid"); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" ); -is($res->return_code, 0, "OK for first call" ); -is($res->output, "No previous data to calculate rate - assume okay" ); -# Need to sleep, otherwise duration=0 -sleep 1; + $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" ); + is($res->return_code, 0, "OK for first call" ); + is($res->output, "No previous data to calculate rate - assume okay" ); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" ); -is($res->return_code, 0, "OK as no thresholds" ); -is($res->output, "SNMP RATE OK - inoctets 666 | inoctets=666 ", "Check label"); + # test rate 1 second later + $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" ); + is($res->return_code, 0, "OK as no thresholds" ); + is($res->output, "SNMP RATE OK - inoctets 666 | inoctets=666 ", "Check label"); -sleep 2; + # test rate 3 seconds later + $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+3))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" ); + is($res->return_code, 0, "OK as no thresholds" ); + is($res->output, "SNMP RATE OK - inoctets 333 | inoctets=333 ", "Check rate decreases due to longer interval"); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" ); -is($res->return_code, 0, "OK as no thresholds" ); -is($res->output, "SNMP RATE OK - inoctets 333 | inoctets=333 ", "Check rate decreases due to longer interval"); + # label performance data check + $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l test" ); + is($res->return_code, 0, "OK as no thresholds" ); + is($res->output, "SNMP OK - test 67996 | test=67996c ", "Check label"); -# label performance data check -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l test" ); -is($res->return_code, 0, "OK as no thresholds" ); -is($res->output, "SNMP OK - test 67996 | test=67996c ", "Check label"); + $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l \"test'test\"" ); + is($res->return_code, 0, "OK as no thresholds" ); + is($res->output, "SNMP OK - test'test 68662 | \"test'test\"=68662c ", "Check label"); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l \"test'test\"" ); -is($res->return_code, 0, "OK as no thresholds" ); -is($res->output, "SNMP OK - test'test 68662 | \"test'test\"=68662c ", "Check label"); + $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l 'test\"test'" ); + is($res->return_code, 0, "OK as no thresholds" ); + is($res->output, "SNMP OK - test\"test 69328 | 'test\"test'=69328c ", "Check label"); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l 'test\"test'" ); -is($res->return_code, 0, "OK as no thresholds" ); -is($res->output, "SNMP OK - test\"test 69328 | 'test\"test'=69328c ", "Check label"); + $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l test -O" ); + is($res->return_code, 0, "OK as no thresholds" ); + is($res->output, "SNMP OK - test 69994 | iso.3.6.1.4.1.8072.3.2.67.10=69994c ", "Check label"); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l test -O" ); -is($res->return_code, 0, "OK as no thresholds" ); -is($res->output, "SNMP OK - test 69994 | iso.3.6.1.4.1.8072.3.2.67.10=69994c ", "Check label"); + $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10" ); + is($res->return_code, 0, "OK as no thresholds" ); + is($res->output, "SNMP OK - 70660 | iso.3.6.1.4.1.8072.3.2.67.10=70660c ", "Check label"); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10" ); -is($res->return_code, 0, "OK as no thresholds" ); -is($res->output, "SNMP OK - 70660 | iso.3.6.1.4.1.8072.3.2.67.10=70660c ", "Check label"); + $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l 'test test'" ); + is($res->return_code, 0, "OK as no thresholds" ); + is($res->output, "SNMP OK - test test 71326 | 'test test'=71326c ", "Check label"); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l 'test test'" ); -is($res->return_code, 0, "OK as no thresholds" ); -is($res->output, "SNMP OK - test test 71326 | 'test test'=71326c ", "Check label"); + $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets_per_minute --rate-multiplier=60" ); + is($res->return_code, 0, "OK for first call" ); + is($res->output, "No previous data to calculate rate - assume okay" ); -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets_per_minute --rate-multiplier=60" ); -is($res->return_code, 0, "OK for first call" ); -is($res->output, "No previous data to calculate rate - assume okay" ); - -# Need to sleep, otherwise duration=0 -sleep 1; + # test 1 second later + $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets_per_minute --rate-multiplier=60" ); + is($res->return_code, 0, "OK as no thresholds" ); + is($res->output, "SNMP RATE OK - inoctets_per_minute 39960 | inoctets_per_minute=39960 ", "Checking multiplier"); +}; -$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets_per_minute --rate-multiplier=60" ); -is($res->return_code, 0, "OK as no thresholds" ); -is($res->output, "SNMP RATE OK - inoctets_per_minute 39960 | inoctets_per_minute=39960 ", "Checking multiplier"); $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.11 -s '\"stringtests\"'" ); -- cgit v0.10-9-g596f From 0ec21334c3add0e3e7d345da11ce438797bf40df Mon Sep 17 00:00:00 2001 From: Jan Wagner Date: Sat, 2 Feb 2019 16:36:47 +0100 Subject: travis-ci: Switch over to xenial diff --git a/.travis.yml b/.travis.yml index 712f247..dce12f5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,5 @@ sudo: required -dist: trusty +dist: xenial language: c env: -- cgit v0.10-9-g596f From a8d96ce2c6bd5ed10b77ac0fe4cb6979c7027e19 Mon Sep 17 00:00:00 2001 From: Jan Wagner Date: Sat, 2 Feb 2019 16:47:07 +0100 Subject: travis-ci: Remove backports ppa diff --git a/.travis.yml b/.travis.yml index dce12f5..d7716fa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -39,7 +39,6 @@ before_install: - "sudo killall -9 ntpd ||:" # Trusty has no swap, lets create some - sudo fallocate -l 20M /swapfile; sudo chmod 600 /swapfile; sudo mkswap /swapfile; sudo swapon /swapfile - - sudo add-apt-repository -y ppa:waja/trusty-backports - sudo apt-get update -qq - sudo apt-get purge -qq gawk -- cgit v0.10-9-g596f From 0ca6c28ebfcf647b72ec2f62a75ef846aad58754 Mon Sep 17 00:00:00 2001 From: Jan Wagner Date: Sat, 2 Feb 2019 16:52:08 +0100 Subject: travis-ci: The package name is now 'squid' diff --git a/.travis.yml b/.travis.yml index d7716fa..43e8ee8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -51,7 +51,7 @@ install: - sudo apt-get install -qq --no-install-recommends autoconf automake - sudo apt-get install -qq --no-install-recommends faketime - sudo apt-get install -qq --no-install-recommends libmonitoring-plugin-perl - - sudo apt-get install -qq --no-install-recommends squid3 + - sudo apt-get install -qq --no-install-recommends squid # Trusty related dependencies (not yet provided) - test "$(dpkg -l | grep -E "mysql-(client|server)-[0-9].[0-9]" | grep -c ^ii)" -gt 0 || sudo apt-get install -qq --no-install-recommends mariadb-client mariadb-server # enable ssl apache @@ -59,8 +59,8 @@ install: - sudo a2ensite default-ssl - sudo make-ssl-cert generate-default-snakeoil --force-overwrite - sudo service apache2 reload - - sudo cp tools/squid.conf /etc/squid3/squid.conf - - sudo service squid3 reload + - sudo cp tools/squid.conf /etc/squid/squid.conf + - sudo service squid reload before_script: # ensure we have a test database in place for tests -- cgit v0.10-9-g596f From 2416aaadd879caae33438e09f6d2eeffdb5434c2 Mon Sep 17 00:00:00 2001 From: Jan Wagner Date: Sat, 2 Feb 2019 17:26:01 +0100 Subject: travis-ci: Restart mysql diff --git a/.travis.yml b/.travis.yml index 43e8ee8..134d218 100644 --- a/.travis.yml +++ b/.travis.yml @@ -61,6 +61,7 @@ install: - sudo service apache2 reload - sudo cp tools/squid.conf /etc/squid/squid.conf - sudo service squid reload + - sudo service mysql restart before_script: # ensure we have a test database in place for tests -- cgit v0.10-9-g596f From 2dcfbbcad03baf1124644184ec072ac0dba024f7 Mon Sep 17 00:00:00 2001 From: Jan Wagner Date: Mon, 4 Feb 2019 14:24:29 +0100 Subject: travis-ci: Use RSA keys for SSH tests diff --git a/.travis.yml b/.travis.yml index 134d218..946345c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -73,8 +73,8 @@ before_script: - make - export NPTEST_ACCEPTDEFAULT=1 - export NPTEST_CACHE="$(pwd)/plugins/t/NPTest.cache.travis" - - ssh-keygen -t dsa -N "" -f ~/.ssh/id_dsa - - cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys + - ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa + - cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys - ssh-keyscan localhost >> ~/.ssh/known_hosts - touch ~/.ssh/config - sudo rm -f /usr/share/mibs/ietf/SNMPv2-PDU /usr/share/mibs/ietf/IPSEC-SPD-MIB /usr/share/mibs/ietf/IPATM-IPMC-MIB /usr/share/mibs/iana/IANA-IPPM-METRICS-REGISTRY-MIB diff --git a/plugins/t/NPTest.cache.travis b/plugins/t/NPTest.cache.travis index 6ee4505..9b9f805 100644 --- a/plugins/t/NPTest.cache.travis +++ b/plugins/t/NPTest.cache.travis @@ -50,5 +50,5 @@ 'NP_SNMP_USER' => '', 'NP_SSH_CONFIGFILE' => '~/.ssh/config', 'NP_SSH_HOST' => 'localhost', - 'NP_SSH_IDENTITY' => '~/.ssh/id_dsa' + 'NP_SSH_IDENTITY' => '~/.ssh/id_rsa' } -- cgit v0.10-9-g596f From f7f0f7d4671300e54e5d70278fa419c2782d9ef6 Mon Sep 17 00:00:00 2001 From: Tobias Wolf Date: Tue, 12 Feb 2019 13:01:23 +0100 Subject: Also support the --show-body/-B flag when --expect is used diff --git a/plugins/check_http.c b/plugins/check_http.c index 856e1e9..de59a06 100644 --- a/plugins/check_http.c +++ b/plugins/check_http.c @@ -1155,6 +1155,8 @@ check_http (void) xasprintf (&msg, _("Invalid HTTP response received from host on port %d: %s\n"), server_port, status_line); + if (show_body) + xasprintf (&msg, _("%s\n%s"), msg, page); die (STATE_CRITICAL, "HTTP CRITICAL - %s", msg); } -- cgit v0.10-9-g596f From 7cafb0e84550035fe671662c293122be975065ca Mon Sep 17 00:00:00 2001 From: Sven Nierlein Date: Fri, 15 Feb 2019 10:36:28 +0100 Subject: check_by_ssh: fix child process leak on timeouts When check_by_ssh runs into a timeout it simply exits keeping all child processes running. Simply adopting the kill loop from runcmd_timeout_alarm_handler() fixes this. Signed-off-by: Sven Nierlein diff --git a/lib/utils_base.c b/lib/utils_base.c index 19a531f..fd7058d 100644 --- a/lib/utils_base.c +++ b/lib/utils_base.c @@ -37,6 +37,9 @@ monitoring_plugin *this_monitoring_plugin=NULL; +unsigned int timeout_state = STATE_CRITICAL; +unsigned int timeout_interval = DEFAULT_SOCKET_TIMEOUT; + int _np_state_read_file(FILE *); void np_init( char *plugin_name, int argc, char **argv ) { @@ -359,6 +362,22 @@ char *np_extract_value(const char *varlist, const char *name, char sep) { return value; } +const char * +state_text (int result) +{ + switch (result) { + case STATE_OK: + return "OK"; + case STATE_WARNING: + return "WARNING"; + case STATE_CRITICAL: + return "CRITICAL"; + case STATE_DEPENDENT: + return "DEPENDENT"; + default: + return "UNKNOWN"; + } +} /* * Read a string representing a state (ok, warning... or numeric: 0, 1) and diff --git a/lib/utils_base.h b/lib/utils_base.h index 42ae0c0..d7e7dff 100644 --- a/lib/utils_base.h +++ b/lib/utils_base.h @@ -61,6 +61,10 @@ void print_thresholds(const char *, thresholds *); int check_range(double, range *); int get_status(double, thresholds *); +/* Handle timeouts */ +extern unsigned int timeout_state; +extern unsigned int timeout_interval; + /* All possible characters in a threshold range */ #define NP_THRESHOLDS_CHARS "-0123456789.:@~" @@ -107,5 +111,6 @@ void np_state_write_string(time_t, char *); void np_init(char *, int argc, char **argv); void np_set_args(int argc, char **argv); void np_cleanup(); +const char *state_text (int); #endif /* _UTILS_BASE_ */ diff --git a/lib/utils_cmd.c b/lib/utils_cmd.c index 7eb9a3a..795840d 100644 --- a/lib/utils_cmd.c +++ b/lib/utils_cmd.c @@ -40,6 +40,7 @@ /** includes **/ #include "common.h" +#include "utils.h" #include "utils_cmd.h" #include "utils_base.h" #include @@ -65,31 +66,6 @@ extern char **environ; # define SIG_ERR ((Sigfunc *)-1) #endif -/* This variable must be global, since there's no way the caller - * can forcibly slay a dead or ungainly running program otherwise. - * Multithreading apps and plugins can initialize it (via CMD_INIT) - * in an async safe manner PRIOR to calling cmd_run() or cmd_run_array() - * for the first time. - * - * The check for initialized values is atomic and can - * occur in any number of threads simultaneously. */ -static pid_t *_cmd_pids = NULL; - -/* Try sysconf(_SC_OPEN_MAX) first, as it can be higher than OPEN_MAX. - * If that fails and the macro isn't defined, we fall back to an educated - * guess. There's no guarantee that our guess is adequate and the program - * will die with SIGSEGV if it isn't and the upper boundary is breached. */ -#define DEFAULT_MAXFD 256 /* fallback value if no max open files value is set */ -#define MAXFD_LIMIT 8192 /* upper limit of open files */ -#ifdef _SC_OPEN_MAX -static long maxfd = 0; -#elif defined(OPEN_MAX) -# define maxfd OPEN_MAX -#else /* sysconf macro unavailable, so guess (may be wildly inaccurate) */ -# define maxfd DEFAULT_MAXFD -#endif - - /** prototypes **/ static int _cmd_open (char *const *, int *, int *) __attribute__ ((__nonnull__ (1, 2, 3))); @@ -406,3 +382,19 @@ cmd_file_read ( char *filename, output *out, int flags) return 0; } + +void +timeout_alarm_handler (int signo) +{ + size_t i; + if (signo == SIGALRM) { + printf (_("%s - Plugin timed out after %d seconds\n"), + state_text(timeout_state), timeout_interval); + + if(_cmd_pids) for(i = 0; i < maxfd; i++) { + if(_cmd_pids[i] != 0) kill(_cmd_pids[i], SIGKILL); + } + + exit (timeout_state); + } +} diff --git a/lib/utils_cmd.h b/lib/utils_cmd.h index ebaf15b..6f3aeb8 100644 --- a/lib/utils_cmd.h +++ b/lib/utils_cmd.h @@ -32,4 +32,17 @@ void cmd_init (void); #define CMD_NO_ARRAYS 0x01 /* don't populate arrays at all */ #define CMD_NO_ASSOC 0x02 /* output.line won't point to buf */ +/* This variable must be global, since there's no way the caller + * can forcibly slay a dead or ungainly running program otherwise. + * Multithreading apps and plugins can initialize it (via CMD_INIT) + * in an async safe manner PRIOR to calling cmd_run() or cmd_run_array() + * for the first time. + * + * The check for initialized values is atomic and can + * occur in any number of threads simultaneously. */ +static pid_t *_cmd_pids = NULL; + +RETSIGTYPE timeout_alarm_handler (int); + + #endif /* _UTILS_CMD_ */ diff --git a/plugins/check_dbi.c b/plugins/check_dbi.c index 826eb8d..ced13d0 100644 --- a/plugins/check_dbi.c +++ b/plugins/check_dbi.c @@ -35,6 +35,7 @@ const char *email = "devel@monitoring-plugins.org"; #include "common.h" #include "utils.h" +#include "utils_cmd.h" #include "netutils.h" diff --git a/plugins/check_pgsql.c b/plugins/check_pgsql.c index 5cd4709..11ce691 100644 --- a/plugins/check_pgsql.c +++ b/plugins/check_pgsql.c @@ -34,6 +34,7 @@ const char *email = "devel@monitoring-plugins.org"; #include "common.h" #include "utils.h" +#include "utils_cmd.h" #include "netutils.h" #include diff --git a/plugins/common.h b/plugins/common.h index 6bf4fca..0f08e2f 100644 --- a/plugins/common.h +++ b/plugins/common.h @@ -225,4 +225,18 @@ enum { # define __attribute__(x) /* do nothing */ #endif +/* Try sysconf(_SC_OPEN_MAX) first, as it can be higher than OPEN_MAX. + * If that fails and the macro isn't defined, we fall back to an educated + * guess. There's no guarantee that our guess is adequate and the program + * will die with SIGSEGV if it isn't and the upper boundary is breached. */ +#define DEFAULT_MAXFD 256 /* fallback value if no max open files value is set */ +#define MAXFD_LIMIT 8192 /* upper limit of open files */ +#ifdef _SC_OPEN_MAX +static long maxfd = 0; +#elif defined(OPEN_MAX) +# define maxfd OPEN_MAX +#else /* sysconf macro unavailable, so guess (may be wildly inaccurate) */ +# define maxfd DEFAULT_MAXFD +#endif + #endif /* _COMMON_H_ */ diff --git a/plugins/popen.c b/plugins/popen.c index 592263f..116d168 100644 --- a/plugins/popen.c +++ b/plugins/popen.c @@ -78,7 +78,6 @@ RETSIGTYPE popen_timeout_alarm_handler (int); #define min(a,b) ((a) < (b) ? (a) : (b)) #define max(a,b) ((a) > (b) ? (a) : (b)) -int open_max (void); /* {Prog openmax} */ static void err_sys (const char *, ...) __attribute__((noreturn,format(printf, 1, 2))); char *rtrim (char *, const char *); @@ -86,7 +85,6 @@ char *pname = NULL; /* caller can set this from argv[0] */ /*int *childerr = NULL;*//* ptr to array allocated at run-time */ /*extern pid_t *childpid = NULL; *//* ptr to array allocated at run-time */ -static int maxfd; /* from our open_max(), {Prog openmax} */ #ifdef REDHAT_SPOPEN_ERROR static volatile int childtermd = 0; @@ -187,13 +185,11 @@ spopen (const char *cmdstring) argv[i] = NULL; if (childpid == NULL) { /* first time through */ - maxfd = open_max (); /* allocate zeroed out array for child pids */ if ((childpid = calloc ((size_t)maxfd, sizeof (pid_t))) == NULL) return (NULL); } if (child_stderr_array == NULL) { /* first time through */ - maxfd = open_max (); /* allocate zeroed out array for child pids */ if ((child_stderr_array = calloc ((size_t)maxfd, sizeof (int))) == NULL) return (NULL); } @@ -273,15 +269,6 @@ spclose (FILE * fp) return (1); } -#ifdef OPEN_MAX -static int openmax = OPEN_MAX; -#else -static int openmax = 0; -#endif - -#define OPEN_MAX_GUESS 256 /* if OPEN_MAX is indeterminate */ - /* no guarantee this is adequate */ - #ifdef REDHAT_SPOPEN_ERROR RETSIGTYPE popen_sigchld_handler (int signo) @@ -311,22 +298,6 @@ popen_timeout_alarm_handler (int signo) } -int -open_max (void) -{ - if (openmax == 0) { /* first time through */ - errno = 0; - if ((openmax = sysconf (_SC_OPEN_MAX)) < 0) { - if (errno == 0) - openmax = OPEN_MAX_GUESS; /* it's indeterminate */ - else - err_sys (_("sysconf error for _SC_OPEN_MAX")); - } - } - return (openmax); -} - - /* Fatal error related to a system call. * Print a message and die. */ diff --git a/plugins/runcmd.c b/plugins/runcmd.c index 1a7c904..c382867 100644 --- a/plugins/runcmd.c +++ b/plugins/runcmd.c @@ -67,19 +67,6 @@ * occur in any number of threads simultaneously. */ static pid_t *np_pids = NULL; -/* Try sysconf(_SC_OPEN_MAX) first, as it can be higher than OPEN_MAX. - * If that fails and the macro isn't defined, we fall back to an educated - * guess. There's no guarantee that our guess is adequate and the program - * will die with SIGSEGV if it isn't and the upper boundary is breached. */ -#ifdef _SC_OPEN_MAX -static long maxfd = 0; -#elif defined(OPEN_MAX) -# define maxfd OPEN_MAX -#else /* sysconf macro unavailable, so guess (may be wildly inaccurate) */ -# define maxfd 256 -#endif - - /** prototypes **/ static int np_runcmd_open(const char *, int *, int *) __attribute__((__nonnull__(1, 2, 3))); diff --git a/plugins/utils.c b/plugins/utils.c index 231af92..ee62013 100644 --- a/plugins/utils.c +++ b/plugins/utils.c @@ -36,9 +36,6 @@ extern const char *progname; #define STRLEN 64 #define TXTBLK 128 -unsigned int timeout_state = STATE_CRITICAL; -unsigned int timeout_interval = DEFAULT_SOCKET_TIMEOUT; - time_t start_time, end_time; /* ************************************************************************** @@ -148,33 +145,6 @@ print_revision (const char *command_name, const char *revision) command_name, revision, PACKAGE, VERSION); } -const char * -state_text (int result) -{ - switch (result) { - case STATE_OK: - return "OK"; - case STATE_WARNING: - return "WARNING"; - case STATE_CRITICAL: - return "CRITICAL"; - case STATE_DEPENDENT: - return "DEPENDENT"; - default: - return "UNKNOWN"; - } -} - -void -timeout_alarm_handler (int signo) -{ - if (signo == SIGALRM) { - printf (_("%s - Plugin timed out after %d seconds\n"), - state_text(timeout_state), timeout_interval); - exit (timeout_state); - } -} - int is_numeric (char *number) { @@ -708,4 +678,3 @@ char *sperfdata_int (const char *label, return data; } - diff --git a/plugins/utils.h b/plugins/utils.h index a436e1c..6aa316f 100644 --- a/plugins/utils.h +++ b/plugins/utils.h @@ -29,13 +29,6 @@ suite of plugins. */ void support (void); void print_revision (const char *, const char *); -/* Handle timeouts */ - -extern unsigned int timeout_state; -extern unsigned int timeout_interval; - -RETSIGTYPE timeout_alarm_handler (int); - extern time_t start_time, end_time; /* Test input types */ @@ -89,8 +82,6 @@ void usage4(const char *) __attribute__((noreturn)); void usage5(void) __attribute__((noreturn)); void usage_va(const char *fmt, ...) __attribute__((noreturn)); -const char *state_text (int); - #define max(a,b) (((a)>(b))?(a):(b)) #define min(a,b) (((a)<(b))?(a):(b)) -- cgit v0.10-9-g596f From 763eb740b3240458cbb9ed6c6356bfc5b9a18e71 Mon Sep 17 00:00:00 2001 From: Rolf Eike Beer Date: Fri, 15 Feb 2019 17:31:29 +0100 Subject: check_dns: fix typo in parameter description diff --git a/plugins/check_dns.c b/plugins/check_dns.c index 25bd31d..d4d0b88 100644 --- a/plugins/check_dns.c +++ b/plugins/check_dns.c @@ -554,7 +554,7 @@ print_help (void) printf (" -c, --critical=seconds\n"); printf (" %s\n", _("Return critical if elapsed time exceeds value. Default off")); printf (" -L, --all\n"); - printf (" %s\n", _("Return critical the list of expected addresses does not match all addresses")); + printf (" %s\n", _("Return critical if the list of expected addresses does not match all addresses")); printf (" %s\n", _("returned. Default off")); printf (UT_CONN_TIMEOUT, DEFAULT_SOCKET_TIMEOUT); -- cgit v0.10-9-g596f From 8442ed5b41a68f6623492235b132c21735d51897 Mon Sep 17 00:00:00 2001 From: Sven Nierlein Date: Tue, 19 Feb 2019 16:35:28 +0100 Subject: renew test certificates Signed-off-by: Sven Nierlein diff --git a/plugins/tests/certs/server-cert.pem b/plugins/tests/certs/server-cert.pem index 549e4f7..b84b91d 100644 --- a/plugins/tests/certs/server-cert.pem +++ b/plugins/tests/certs/server-cert.pem @@ -1,21 +1,24 @@ -----BEGIN CERTIFICATE----- -MIIDYzCCAsygAwIBAgIJAL8LkpNwzYdxMA0GCSqGSIb3DQEBBAUAMH8xCzAJBgNV -BAYTAlVLMRMwEQYDVQQIEwpEZXJieXNoaXJlMQ8wDQYDVQQHEwZCZWxwZXIxFzAV -BgNVBAoTDk5hZ2lvcyBQbHVnaW5zMREwDwYDVQQDEwhUb24gVm9vbjEeMBwGCSqG -SIb3DQEJARYPdG9udm9vbkBtYWMuY29tMB4XDTA5MDMwNTIxNDEyOFoXDTE5MDMw -MzIxNDEyOFowfzELMAkGA1UEBhMCVUsxEzARBgNVBAgTCkRlcmJ5c2hpcmUxDzAN -BgNVBAcTBkJlbHBlcjEXMBUGA1UEChMOTmFnaW9zIFBsdWdpbnMxETAPBgNVBAMT -CFRvbiBWb29uMR4wHAYJKoZIhvcNAQkBFg90b252b29uQG1hYy5jb20wgZ8wDQYJ -KoZIhvcNAQEBBQADgY0AMIGJAoGBAKcWMBtNtfY8vZXk0SN6/EYTVN/LOvaOSegy -oVdLoGwuwjagk+XmCzvCqHZRp8lnCLay7AO8AQI7TSN02ihCcSrgGA9OT+HciIJ1 -l5/kEYUAuA1PR6YKK/T713zUAlMzy2tsugx5+xSsSEwsXkmne52jJiG/wuE5CLT0 -9pF8HQqHAgMBAAGjgeYwgeMwHQYDVR0OBBYEFGioSPQ/rdE19+zaeY2YvHTXlUDI -MIGzBgNVHSMEgaswgaiAFGioSPQ/rdE19+zaeY2YvHTXlUDIoYGEpIGBMH8xCzAJ -BgNVBAYTAlVLMRMwEQYDVQQIEwpEZXJieXNoaXJlMQ8wDQYDVQQHEwZCZWxwZXIx -FzAVBgNVBAoTDk5hZ2lvcyBQbHVnaW5zMREwDwYDVQQDEwhUb24gVm9vbjEeMBwG -CSqGSIb3DQEJARYPdG9udm9vbkBtYWMuY29tggkAvwuSk3DNh3EwDAYDVR0TBAUw -AwEB/zANBgkqhkiG9w0BAQQFAAOBgQCdqasaIO6JiV5ONFG6Tr1++85UfEdZKMUX -N2NHiNNUunolIZEYR+dW99ezKmHlDiQ/tMgoLVYpl2Ubho2pAkLGQR+W0ZASgWQ1 -NjfV27Rv0y6lYQMTA0lVAU93L1x9reo3FMedmL5+H+lIEpLCxEPtAJNISrJOneZB -W5jDadwkoQ== +MIIEBjCCAu6gAwIBAgIJANbQ5QQrKhUGMA0GCSqGSIb3DQEBCwUAMIGXMQswCQYD +VQQGEwJERTEQMA4GA1UECAwHQmF2YXJpYTEPMA0GA1UEBwwGTXVuaWNoMRswGQYD +VQQKDBJNb25pdG9yaW5nIFBsdWdpbnMxGzAZBgNVBAMMEk1vbml0b3JpbmcgUGx1 +Z2luczErMCkGCSqGSIb3DQEJARYcZGV2ZWxAbW9uaXRvcmluZy1wbHVnaW5zLm9y +ZzAeFw0xOTAyMTkxNTMxNDRaFw0yOTAyMTYxNTMxNDRaMIGXMQswCQYDVQQGEwJE +RTEQMA4GA1UECAwHQmF2YXJpYTEPMA0GA1UEBwwGTXVuaWNoMRswGQYDVQQKDBJN +b25pdG9yaW5nIFBsdWdpbnMxGzAZBgNVBAMMEk1vbml0b3JpbmcgUGx1Z2luczEr +MCkGCSqGSIb3DQEJARYcZGV2ZWxAbW9uaXRvcmluZy1wbHVnaW5zLm9yZzCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKgV2yp8pQvJuN+aJGdAe6Hd0tja +uteCPcNIcM92WLOF69TLTSYon1XDon4tHTh4Z5d4lD8bfsGzFVBmDSgWidhAUf+v +EqEXwbp293ej/Frc0pXCvmrz6kI1tWrLtQhL/VdbxFYxhV7JjKb+PY3SxGFpSLPe +PQ/5SwVndv7rZIwcjseL22K5Uy2TIrkgzzm2pRs/IvoxRybYr/+LGoHyrtJC6AO8 +ylp8A/etL0gwtUvRnrnZeTQ2pA1uZ5QN3anTL8JP/ZRZYNegIkaawqMtTKbhM6pi +u3/4a3Uppvt0y7vmGfQlYejxCpICnMrvHMpw8L58zv/98AbCGjDU3UwCt6MCAwEA +AaNTMFEwHQYDVR0OBBYEFG/UH6nGYPlVcM75UXzXBF5GZyrcMB8GA1UdIwQYMBaA +FG/UH6nGYPlVcM75UXzXBF5GZyrcMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN +AQELBQADggEBAGwitJPOnlIKLndNf+iCLMIs0dxsl8kAaejFcjoT0n4ja7Y6Zrqz +VSIidzz9vQWvy24xKJpAOdj/iLRHCUOG+Pf5fA6+/FiuqXr6gE2/lm0eC58BNONr +E5OzjQ/VoQ8RX4hDntgu6FYbaVa/vhwn16igt9qmdNGGZXf2/+DM3JADwyaA4EK8 +vm7KdofX9zkxXecHPNvf3jiVLPiDDt6tkGpHPEsyP/yc+RUdltUeZvHfliV0cCuC +jJX+Fm9ysjSpHIFFr+jUMuMHibWoOD8iy3eYxfCDoWsH488pCbj8MNuAq6vd6DBk +bOZxDz43vjWuYMkwXJTxJQh7Pne6kK0vE1g= -----END CERTIFICATE----- diff --git a/plugins/tests/certs/server-key.pem b/plugins/tests/certs/server-key.pem index eacaeaa..1194755 100644 --- a/plugins/tests/certs/server-key.pem +++ b/plugins/tests/certs/server-key.pem @@ -1,15 +1,28 @@ ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQCnFjAbTbX2PL2V5NEjevxGE1Tfyzr2jknoMqFXS6BsLsI2oJPl -5gs7wqh2UafJZwi2suwDvAECO00jdNooQnEq4BgPTk/h3IiCdZef5BGFALgNT0em -Civ0+9d81AJTM8trbLoMefsUrEhMLF5Jp3udoyYhv8LhOQi09PaRfB0KhwIDAQAB -AoGAfpxclcP8N3vteXErXURrd7pcXT0GECDgNjhvc9PV20RPXM+vYs1AA+fMeeQE -TaRqwO6x016aMRO4rz5ztYArecTBznkds1k59pkN/Ne/nsueU4tvGK8MNyS2o986 -Voohqkaq4Lcy1bcHJb9su1ELjegEr1R76Mz452Hsy+uTbAECQQDcg/tZWKVeh5CQ -dOEB3YWHwfn0NDgfPm/X2i2kAZ7n7URaUy/ffdlfsrr1mBtHCfedLoOxmmlNfEpM -hXAAurSHAkEAwfk7fEb0iN0Sj9gTozO7c6Ky10KwePZyjVzqSQIiJq3NX8BEaIeb -51TXxE5VxaLjjMLRkA0hWTYXClgERFZ6AQJAN7ChPqwzf08PRFwwIw911JY5cOHr -NoDHMCUql5vNLNdwBruxgGjBB/kUXEfgw60RusFvgt/zLh1wiii844JDawJAGQBF -sYP3urg7zzx7c3qUe5gJ0wLuefjR1PSX4ecbfb7DDMdcSdjIuG1QDiZGmd2f1KG7 -nwSCOtxk5dloW2KGAQJAQh/iBn0QhfKLFAP5eZBVk8E8XlZuw+S2DLy5SnBlIiYJ -GB5I2OClgtudXMv1labFrcST8O9eFrtsrhU1iUGUOw== ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCoFdsqfKULybjf +miRnQHuh3dLY2rrXgj3DSHDPdlizhevUy00mKJ9Vw6J+LR04eGeXeJQ/G37BsxVQ +Zg0oFonYQFH/rxKhF8G6dvd3o/xa3NKVwr5q8+pCNbVqy7UIS/1XW8RWMYVeyYym +/j2N0sRhaUiz3j0P+UsFZ3b+62SMHI7Hi9tiuVMtkyK5IM85tqUbPyL6MUcm2K// +ixqB8q7SQugDvMpafAP3rS9IMLVL0Z652Xk0NqQNbmeUDd2p0y/CT/2UWWDXoCJG +msKjLUym4TOqYrt/+Gt1Kab7dMu75hn0JWHo8QqSApzK7xzKcPC+fM7//fAGwhow +1N1MArejAgMBAAECggEANuvdTwanTzC8jaNqHaq+OuemS2E9B8nwsGxtH/zFgvNR +WZiMPtmrJnTkFWJcV+VPw/iMSAqN4nDHmBugVOb4Z4asxGTKK4T9shXJSnh0rqPU +00ZsvbmxY6z0+E5TesCJqQ+9GYTY1V357V7JchvaOxIRxWPqg9urHbru8OCtW/I5 +Fh5HPUZlgCvlMpjlhyjydIf/oXyVA3RNsXlwe8+2cKuGIrjEzm2j9o3VF0sctTX0 +ItP8A9qDmDQN7GIWX0MW6gncojpS1omC2wcFsdjj/xfPyiDal1X4aq/2YqG8351c +YlM/+6Va0u9WWE/i64gASTAVqpMV4Yg8y0gGycuA0QKBgQDbgI2QeLd3FvMcURiU +l3w9qJgw/Jp3jaNC/9LkVGGz4f4lKKB67lPZvI4noMK8GqO/LcXgqP/RY1oJojoA +/6JKVvzYGASZ7VgMoG9bk1AneP1PGdibuTUEwimGlcObxnDFIC/yjwPFu3jIdqdS +zZi1RZzyqAogN5y3SBEypSmn9wKBgQDECKsqqlcizmCl8v5aVk875AzGN+DOHZqx +bkmztlnLO/2e2Fmk3G5Vvnui0FYisf8Eq19tUTQCF6lSfJlGQeFAT119wkFZhLu+ +FfLGqoEMH0ijJg/8PpdpFRK3I94YcISoTNN6yxMvE6xdDGfKCt5a+IX5bwQi9Zdc +B242gEc6tQKBgA6tM8n7KFlAIZU9HuWgk2AUC8kKutFPmSD7tgAqXDYI4FNfugs+ +MEEYyHCB4UNujJBV4Ss6YZCAkh6eyD4U2aca1eElCfm40vBVMdzvpqZdAqLtWXxg +D9l3mgszrFaYGCY2Fr6jLV9lP5g3xsxUjudf9jSLY9HvpfzjRrMaNATVAoGBALTl +/vYfPMucwKlC5B7++J0e4/7iv6vUu9SyHocdZh1anb9AjPDKjXLIlZT4RhQ8R0XK +0wOw5JpttU2uN08TKkbLNk3/vYhbKVjPLjrQSseh8sjDLgsqw1QwIxYnniLVakVY +p+rvjSNrNyqicQCMKQavwgocvSd5lJRTMwxOMezlAoGBAKWj71BX+0CK00/2S6lC +TcNcuUPG0d8y1czZ4q6tUlG4htwq1FMOpaghATXjkdsOGTLS+H1aA0Kt7Ai9zDhc +/bzOJEJ+jvBXV4Gcs7jl1r/HTKv0tT9ZSI5Vzkida0rfqxDGzcMVlLuCdH0cb8Iu +N0wdmCAqlQwHR13+F1zrAD7V +-----END PRIVATE KEY----- diff --git a/plugins/tests/check_http.t b/plugins/tests/check_http.t index d6d31de..006f133 100755 --- a/plugins/tests/check_http.t +++ b/plugins/tests/check_http.t @@ -4,13 +4,13 @@ # # To create the https server certificate: # openssl req -new -x509 -keyout server-key.pem -out server-cert.pem -days 3650 -nodes -# Country Name (2 letter code) [AU]:UK -# State or Province Name (full name) [Some-State]:Derbyshire -# Locality Name (eg, city) []:Belper +# Country Name (2 letter code) [AU]:DE +# State or Province Name (full name) [Some-State]:Bavaria +# Locality Name (eg, city) []:Munich # Organization Name (eg, company) [Internet Widgits Pty Ltd]:Monitoring Plugins # Organizational Unit Name (eg, section) []: -# Common Name (eg, YOUR name) []:Ton Voon -# Email Address []:tonvoon@mac.com +# Common Name (e.g. server FQDN or YOUR name) []:Monitoring Plugins +# Email Address []:devel@monitoring-plugins.org use strict; use Test::More; @@ -194,16 +194,16 @@ SKIP: { $result = NPTest->testCmd( "$command -p $port_https -S -C 14" ); is( $result->return_code, 0, "$command -p $port_https -S -C 14" ); - is( $result->output, 'OK - Certificate \'Ton Voon\' will expire on Sun Mar 3 21:41:28 2019 +0000.', "output ok" ); + is( $result->output, "OK - Certificate 'Monitoring Plugins' will expire on Fri Feb 16 15:31:44 2029 +0000.", "output ok" ); $result = NPTest->testCmd( "$command -p $port_https -S -C 14000" ); is( $result->return_code, 1, "$command -p $port_https -S -C 14000" ); - like( $result->output, '/WARNING - Certificate \'Ton Voon\' expires in \d+ day\(s\) \(Sun Mar 3 21:41:28 2019 \+0000\)./', "output ok" ); + like( $result->output, '/WARNING - Certificate \'Monitoring Plugins\' expires in \d+ day\(s\) \(Fri Feb 16 15:31:44 2029 \+0000\)./', "output ok" ); # Expired cert tests $result = NPTest->testCmd( "$command -p $port_https -S -C 13960,14000" ); is( $result->return_code, 2, "$command -p $port_https -S -C 13960,14000" ); - like( $result->output, '/CRITICAL - Certificate \'Ton Voon\' expires in \d+ day\(s\) \(Sun Mar 3 21:41:28 2019 \+0000\)./', "output ok" ); + like( $result->output, '/CRITICAL - Certificate \'Monitoring Plugins\' expires in \d+ day\(s\) \(Fri Feb 16 15:31:44 2029 \+0000\)./', "output ok" ); $result = NPTest->testCmd( "$command -p $port_https_expired -S -C 7" ); is( $result->return_code, 2, "$command -p $port_https_expired -S -C 7" ); -- cgit v0.10-9-g596f From 2bc4cc99d2c3fdbbd06af862cb0ae1098895f860 Mon Sep 17 00:00:00 2001 From: Robin Sonefors Date: Fri, 10 May 2013 17:21:34 +0200 Subject: check_mysql: Allow sockets to be specified to -H The help text says that -H accepts a "unix socket (must be an absolute path)". Now that actually corresponds to reality. Signed-off-by: Robin Sonefors diff --git a/plugins/check_mysql.c b/plugins/check_mysql.c index 5773afd..0cba50e 100644 --- a/plugins/check_mysql.c +++ b/plugins/check_mysql.c @@ -379,6 +379,9 @@ process_arguments (int argc, char **argv) if (is_host (optarg)) { db_host = optarg; } + else if (*optarg == '/') { + db_socket = optarg; + } else { usage2 (_("Invalid hostname/address"), optarg); } -- cgit v0.10-9-g596f From a58ea5ec5d5fecce9224d10b7058ac63050db05a Mon Sep 17 00:00:00 2001 From: ChrisWi Date: Thu, 21 Mar 2019 14:18:37 +0100 Subject: improve command examples for 'at least' processes diff --git a/plugins/check_procs.c b/plugins/check_procs.c index 4bcc56b..f7917c3 100644 --- a/plugins/check_procs.c +++ b/plugins/check_procs.c @@ -764,6 +764,11 @@ be the total number of running processes\n\n")); printf (" %s\n", "check_procs -w 2:2 -c 2:1024 -C portsentry"); printf (" %s\n", _("Warning if not two processes with command name portsentry.")); printf (" %s\n\n", _("Critical if < 2 or > 1024 processes")); + printf (" %s\n", "check_procs -c 1: -C sshd"); + printf (" %s\n", _("Critical if not at least 1 process with command sshd")); + printf (" %s\n", "check_procs -w 1024 -c 1: -C sshd"); + printf (" %s\n", _("Warning if > 1024 processes with command name sshd.")); + printf (" %s\n\n", _("Critical if < 1 processes with command name sshd.")); printf (" %s\n", "check_procs -w 10 -a '/usr/local/bin/perl' -u root"); printf (" %s\n", _("Warning alert if > 10 processes with command arguments containing")); printf (" %s\n\n", _("'/usr/local/bin/perl' and owned by root")); -- cgit v0.10-9-g596f