Ошибка «Malformed patch» при использовании diff для источника wget с патчем

Я хотел бы применить этот патч, чтобы скопировать код, начинающийся с «Index: src / options.h» и заканчивая «+ @ item», и поместить его в новый файл, созданный в папке исходного кода wget . Затем я сделал:

 $ patch -p0 < name_of_patch (Patch is indented 1 space.) patching file src/options.h patch: **** malformed patch at line 6: char **excludes; /* List of excluded FTP directories. */ 

Как я должен применять это?

Это содержимое созданного мной файла:

  Index: src/options.h =================================================================== --- src/options.h (revision 2276) +++ src/options.h (working copy) @@ -62,6 +62,8 @@ char **excludes; /* List of excluded FTP directories. */ char **includes; /* List of FTP directories to follow. */ + int maxsize; /* Maximum file size (kB) */ + int minsize; /* Minimum file size (kB) */ bool ignore_case; /* Whether to ignore case when matching dirs and files */ Index: src/init.c =================================================================== --- src/init.c (revision 2276) +++ src/init.c (working copy) @@ -182,6 +182,8 @@ { "loadcookies", &opt.cookies_input, cmd_file }, { "logfile", &opt.lfilename, cmd_file }, { "login", &opt.ftp_user, cmd_string },/* deprecated*/ + { "maxsize", &opt.maxsize, cmd_number }, + { "minsize", &opt.minsize, cmd_number }, { "mirror", NULL, cmd_spec_mirror }, { "netrc", &opt.netrc, cmd_boolean }, { "noclobber", &opt.noclobber, cmd_boolean }, Index: src/http.c =================================================================== --- src/http.c (revision 2276) +++ src/http.c (working copy) @@ -2252,7 +2252,7 @@ retried, and retried, and retried, and... */ uerr_t http_loop (struct url *u, char **newloc, char **local_file, const char *referer, - int *dt, struct url *proxy) + int *dt, struct url *proxy, bool can_ommit) { int count; bool got_head = false; /* used for time-stamping and filename detection */ @@ -2285,6 +2285,27 @@ if (opt.ftp_glob && has_wildcards_p (u->path)) logputs (LOG_VERBOSE, _("Warning: wildcards not supported in HTTP.\n")); + /* Try fetching the document header and checking the document length */ + if (can_ommit && !opt.spider && !opt.ignore_length && + (opt.minsize > 0 || opt.maxsize > 0)) + { + /* Setup hstat struct. */ + xzero (hstat); + hstat.referer = referer; + + *dt = HEAD_ONLY; + err = gethttp (u, &hstat, dt, proxy); + + if (err == RETRFINISHED && hstat.contlen > 0 && + (opt.minsize > 0 && hstat.contlen < opt.minsize * 1024 || + opt.maxsize > 0 && hstat.contlen > opt.maxsize * 1024)) + { + logputs (LOG_VERBOSE, _("File too small or too big -- not retrieving.\n")); + ret = FILEBADFILE; + goto exit; + } + } + /* Setup hstat struct. */ xzero (hstat); hstat.referer = referer; @@ -2300,7 +2321,7 @@ /* Reset the document type. */ *dt = 0; - + /* THE loop */ do { Index: src/http.h =================================================================== --- src/http.h (revision 2276) +++ src/http.h (working copy) @@ -32,7 +32,7 @@ struct url; uerr_t http_loop (struct url *, char **, char **, const char *, int *, - struct url *); + struct url *, bool); void save_cookies (void); void http_cleanup (void); time_t http_atotm (const char *); Index: src/res.c =================================================================== --- src/res.c (revision 2276) +++ src/res.c (working copy) @@ -545,7 +545,7 @@ *file = NULL; opt.timestamping = false; opt.spider = false; - err = retrieve_url (robots_url, file, NULL, NULL, NULL, false); + err = retrieve_url (robots_url, file, NULL, NULL, NULL, false, false); opt.timestamping = saved_ts_val; opt.spider = saved_sp_val; xfree (robots_url); Index: src/retr.c =================================================================== --- src/retr.c (revision 2276) +++ src/retr.c (working copy) @@ -601,7 +601,7 @@ uerr_t retrieve_url (const char *origurl, char **file, char **newloc, - const char *refurl, int *dt, bool recursive) + const char *refurl, int *dt, bool recursive, bool can_ommit) { uerr_t result; char *url; @@ -676,7 +676,7 @@ #endif || (proxy_url && proxy_url->scheme == SCHEME_HTTP)) { - result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url); + result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url, can_ommit); } else if (u->scheme == SCHEME_FTP) { @@ -856,7 +856,7 @@ opt.follow_ftp = old_follow_ftp; } else - status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, &dt, opt.recursive); + status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, &dt, opt.recursive, false); if (filename && opt.delete_after && file_exists_p (filename)) { Index: src/retr.h =================================================================== --- src/retr.h (revision 2276) +++ src/retr.h (working copy) @@ -49,7 +49,7 @@ char *fd_read_hunk (int, hunk_terminator_t, long, long); char *fd_read_line (int); -uerr_t retrieve_url (const char *, char **, char **, const char *, int *, bool); +uerr_t retrieve_url (const char *, char **, char **, const char *, int *, bool, bool); uerr_t retrieve_from_file (const char *, bool, int *); const char *retr_rate (wgint, double); Index: src/recur.c =================================================================== --- src/recur.c (revision 2276) +++ src/recur.c (working copy) @@ -247,7 +247,7 @@ int dt = 0; char *redirected = NULL; - status = retrieve_url (url, &file, &redirected, referer, &dt, false); + status = retrieve_url (url, &file, &redirected, referer, &dt, false, !html_allowed); if (html_allowed && file && status == RETROK && (dt & RETROKF) && (dt & TEXTHTML)) Index: src/main.c =================================================================== --- src/main.c (revision 2276) +++ src/main.c (working copy) @@ -189,6 +189,8 @@ { "level", 'l', OPT_VALUE, "reclevel", -1 }, { "limit-rate", 0, OPT_VALUE, "limitrate", -1 }, { "load-cookies", 0, OPT_VALUE, "loadcookies", -1 }, + { "max-size", 'M', OPT_VALUE, "maxsize", -1 }, + { "min-size", 's', OPT_VALUE, "minsize", -1 }, { "mirror", 'm', OPT_BOOLEAN, "mirror", -1 }, { "no", 'n', OPT__NO, NULL, required_argument }, { "no-clobber", 0, OPT_BOOLEAN, "noclobber", -1 }, @@ -446,6 +448,10 @@ N_("\ --limit-rate=RATE limit download rate to RATE.\n"), N_("\ + -M, --max-size=SIZE limit maximum file size to SIZE (kB).\n"), + N_("\ + -s, --min-size=SIZE limit minimum file size to SIZE (kB).\n"), + N_("\ --no-dns-cache disable caching DNS lookups.\n"), N_("\ --restrict-file-names=OS restrict chars in file names to ones OS allows.\n"), @@ -675,7 +681,6 @@ stdout); exit (0); } - #ifndef TESTING int main (int argc, char *const *argv) @@ -979,7 +984,7 @@ opt.follow_ftp = old_follow_ftp; } else - status = retrieve_url (*t, &filename, &redirected_URL, NULL, &dt, opt.recursive); + status = retrieve_url (*t, &filename, &redirected_URL, NULL, &dt, opt.recursive, false); if (opt.delete_after && file_exists_p(filename)) { Index: doc/wget.texi =================================================================== --- doc/wget.texi (revision 2276) +++ doc/wget.texi (working copy) @@ -1592,7 +1592,7 @@ @item -l @var{depth} @itemx --level=@var{depth} Specify recursion maximum depth level @var{depth} (@pxref{Recursive -Download}). The default maximum depth is 5. +Download}). The default maximum depth is 5. Zero means infinite recursion. @cindex proxy filling @cindex delete after retrieval @@ -1803,6 +1803,15 @@ Specify the domains that are @emph{not} to be followed. (@pxref{Spanning Hosts}). +@cindex file size range +@item -s @var{size} +@itemx --min-size=@var{size} +Limit the minimum size of non-HTML files to @var{size} kB. Smaller files will not be retrieved. + +@item -M @var{size} +@itemx --max-size=@var{size} +Limit the maximum size of non-HTML files to @var{size} kB. Larger files will not be retrieved. + @cindex follow FTP links @item --follow-ftp Follow @sc{ftp} links from @sc{html} documents. Without this option, @@ -3064,6 +3073,14 @@ too. @item +Retrieve in directory 'pics' all jpeg images from a given site, excluding +files smaller than 50k (to avoid thumbnails) or larger than 400k. + +@example +wget -Ppics -nd -r -l0 -Ajpg,jpeg -s50 -M400 http://www.server.com +@end example + +@item Suppose you were in the middle of downloading, when Wget was interrupted. Now you do not want to clobber the files already present. It would be: Index: src/utils.c =================================================================== --- src/utils.c (revision 2276) +++ src/utils.c (working copy) @@ -432,33 +432,52 @@ #endif } -/* stat file names named PREFIX.1, PREFIX.2, etc., until one that - doesn't exist is found. Return a freshly allocated copy of the - unused file name. */ +/* + * Stat file names named PREFIX-1.SUFFIX, PREFIX-2.SUFFIX, etc., until + * one that doesn't exist is found. Return a freshly allocated copy of + * the unused file name. + */ static char * -unique_name_1 (const char *prefix) +unique_name_1 (const char *s) { int count = 1; - int plen = strlen (prefix); - char *template = (char *)alloca (plen + 1 + 24); - char *template_tail = template + plen; + int p, l = strlen (s); + char *prefix = (char *) alloca (l + 1); + char *suffix = (char *) alloca (l + 1); + char *filename = (char *) alloca (l + 26); + + /* Look for last '.' in filename */ + + for(p = l; p >= 0 && s[p] != '.'; p--); - memcpy (template, prefix, plen); - *template_tail++ = '.'; + /* If none found, then prefix is the whole filename */ + + if (p < 0) + p = l; + /* Extract prefix and (possibly empty) suffix from filename */ + + memcpy (prefix, s, p); + prefix[p] = '\0'; + + memcpy (suffix, s+p, lp); + suffix[lp] = '\0'; + + /* Try indexed filenames until an unused one is found */ + do - number_to_string (template_tail, count++); - while (file_exists_p (template)); + sprintf (filename, "%s-%d%s", prefix, count++, suffix); + while (file_exists_p (filename)); - return xstrdup (template); + return xstrdup (filename); } /* Return a unique file name, based on FILE. - More precisely, if FILE doesn't exist, it is returned unmodified. - If not, FILE.1 is tried, then FILE.2, etc. The first FILE.<number> - file name that doesn't exist is returned. + More precisely, if FILE.SUF doesn't exist, it is returned unmodified. + If not, FILE-1.SUF is tried, then FILE-2.SUF etc. The first + FILE-<number>.SUF file name that doesn't exist is returned. The resulting file is not created, only verified that it didn't exist at the point in time when the function was called. Index: doc/wget.texi =================================================================== --- doc/wget.texi (revision 2276) +++ doc/wget.texi (working copy) @@ -561,16 +561,16 @@ cases, the local file will be @dfn{clobbered}, or overwritten, upon repeated download. In other cases it will be preserved. -When running Wget without @samp{-N}, @samp{-nc}, or @samp{-r}, -downloading the same file in the same directory will result in the -original copy of @var{file} being preserved and the second copy being -named @samp{@var{file}.1}. If that file is downloaded yet again, the -third copy will be named @samp{@var{file}.2}, and so on. When -@samp{-nc} is specified, this behavior is suppressed, and Wget will -refuse to download newer copies of @samp{@var{file}}. Therefore, -``@code{no-clobber}'' is actually a misnomer in this mode---it's not -clobbering that's prevented (as the numeric suffixes were already -preventing clobbering), but rather the multiple version saving that's +When running Wget without @samp{-N}, @samp{-nc}, or @samp{-r}, downloading the +same file in the same directory will result in the original copy of @var{file} +being preserved and the second copy being named +@samp{@var{prefix}-1.@var{suffix}}, assuming @var{file} = @var{prefix.suffix}. +If that file is downloaded yet again, the third copy will be named +@samp{@var{prefix}-2.@var{suffix}}, and so on. When @samp{-nc} is specified, +this behavior is suppressed, and Wget will refuse to download newer copies of +@samp{@var{file}}. Therefore, ``@code{no-clobber}'' is actually a misnomer in +this mode---it's not clobbering that's prevented (as the numeric suffixes were +already preventing clobbering), but rather the multiple version saving that's prevented. When running Wget with @samp{-r}, but without @samp{-N} or @samp{-nc}, @@ -1592,7 +1592,7 @@ @item -l @var{depth} @itemx --level=@var{depth} Specify recursion maximum depth level @var{depth} (@pxref{Recursive -Download}). The default maximum depth is 5. +Download}). The default maximum depth is 5. Zero means infinite recursion. @cindex proxy filling @cindex delete after retrieval @@ -1803,6 +1803,15 @@ Specify the domains that are @emph{not} to be followed. (@pxref{Spanning Hosts}). +@cindex file size range +@item -s @var{size} +@itemx --min-size=@var{size} +Limit the minimum size of non-HTML files to @var{size} kB. Smaller files will not be retrieved. + +@item -M @var{size} +@itemx --max-size=@var{size} +Limit the maximum size of non-HTML files to @var{size} kB. Larger files will not be retrieved. + @cindex follow FTP links @item --follow-ftp Follow @sc{ftp} links from @sc{html} documents. Without this option, @@ -3064,6 +3073,14 @@ too. @item +Retrieve in directory 'pics' all jpeg images from a given site, excluding +files smaller than 50k (to avoid thumbnails) or larger than 400k. + +@example +wget -Ppics -nd -r -l0 -Ajpg,jpeg -s50 -M400 http://www.server.com +@end example + +@item при  Index: src/options.h =================================================================== --- src/options.h (revision 2276) +++ src/options.h (working copy) @@ -62,6 +62,8 @@ char **excludes; /* List of excluded FTP directories. */ char **includes; /* List of FTP directories to follow. */ + int maxsize; /* Maximum file size (kB) */ + int minsize; /* Minimum file size (kB) */ bool ignore_case; /* Whether to ignore case when matching dirs and files */ Index: src/init.c =================================================================== --- src/init.c (revision 2276) +++ src/init.c (working copy) @@ -182,6 +182,8 @@ { "loadcookies", &opt.cookies_input, cmd_file }, { "logfile", &opt.lfilename, cmd_file }, { "login", &opt.ftp_user, cmd_string },/* deprecated*/ + { "maxsize", &opt.maxsize, cmd_number }, + { "minsize", &opt.minsize, cmd_number }, { "mirror", NULL, cmd_spec_mirror }, { "netrc", &opt.netrc, cmd_boolean }, { "noclobber", &opt.noclobber, cmd_boolean }, Index: src/http.c =================================================================== --- src/http.c (revision 2276) +++ src/http.c (working copy) @@ -2252,7 +2252,7 @@ retried, and retried, and retried, and... */ uerr_t http_loop (struct url *u, char **newloc, char **local_file, const char *referer, - int *dt, struct url *proxy) + int *dt, struct url *proxy, bool can_ommit) { int count; bool got_head = false; /* used for time-stamping and filename detection */ @@ -2285,6 +2285,27 @@ if (opt.ftp_glob && has_wildcards_p (u->path)) logputs (LOG_VERBOSE, _("Warning: wildcards not supported in HTTP.\n")); + /* Try fetching the document header and checking the document length */ + if (can_ommit && !opt.spider && !opt.ignore_length && + (opt.minsize > 0 || opt.maxsize > 0)) + { + /* Setup hstat struct. */ + xzero (hstat); + hstat.referer = referer; + + *dt = HEAD_ONLY; + err = gethttp (u, &hstat, dt, proxy); + + if (err == RETRFINISHED && hstat.contlen > 0 && + (opt.minsize > 0 && hstat.contlen < opt.minsize * 1024 || + opt.maxsize > 0 && hstat.contlen > opt.maxsize * 1024)) + { + logputs (LOG_VERBOSE, _("File too small or too big -- not retrieving.\n")); + ret = FILEBADFILE; + goto exit; + } + } + /* Setup hstat struct. */ xzero (hstat); hstat.referer = referer; @@ -2300,7 +2321,7 @@ /* Reset the document type. */ *dt = 0; - + /* THE loop */ do { Index: src/http.h =================================================================== --- src/http.h (revision 2276) +++ src/http.h (working copy) @@ -32,7 +32,7 @@ struct url; uerr_t http_loop (struct url *, char **, char **, const char *, int *, - struct url *); + struct url *, bool); void save_cookies (void); void http_cleanup (void); time_t http_atotm (const char *); Index: src/res.c =================================================================== --- src/res.c (revision 2276) +++ src/res.c (working copy) @@ -545,7 +545,7 @@ *file = NULL; opt.timestamping = false; opt.spider = false; - err = retrieve_url (robots_url, file, NULL, NULL, NULL, false); + err = retrieve_url (robots_url, file, NULL, NULL, NULL, false, false); opt.timestamping = saved_ts_val; opt.spider = saved_sp_val; xfree (robots_url); Index: src/retr.c =================================================================== --- src/retr.c (revision 2276) +++ src/retr.c (working copy) @@ -601,7 +601,7 @@ uerr_t retrieve_url (const char *origurl, char **file, char **newloc, - const char *refurl, int *dt, bool recursive) + const char *refurl, int *dt, bool recursive, bool can_ommit) { uerr_t result; char *url; @@ -676,7 +676,7 @@ #endif || (proxy_url && proxy_url->scheme == SCHEME_HTTP)) { - result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url); + result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url, can_ommit); } else if (u->scheme == SCHEME_FTP) { @@ -856,7 +856,7 @@ opt.follow_ftp = old_follow_ftp; } else - status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, &dt, opt.recursive); + status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, &dt, opt.recursive, false); if (filename && opt.delete_after && file_exists_p (filename)) { Index: src/retr.h =================================================================== --- src/retr.h (revision 2276) +++ src/retr.h (working copy) @@ -49,7 +49,7 @@ char *fd_read_hunk (int, hunk_terminator_t, long, long); char *fd_read_line (int); -uerr_t retrieve_url (const char *, char **, char **, const char *, int *, bool); +uerr_t retrieve_url (const char *, char **, char **, const char *, int *, bool, bool); uerr_t retrieve_from_file (const char *, bool, int *); const char *retr_rate (wgint, double); Index: src/recur.c =================================================================== --- src/recur.c (revision 2276) +++ src/recur.c (working copy) @@ -247,7 +247,7 @@ int dt = 0; char *redirected = NULL; - status = retrieve_url (url, &file, &redirected, referer, &dt, false); + status = retrieve_url (url, &file, &redirected, referer, &dt, false, !html_allowed); if (html_allowed && file && status == RETROK && (dt & RETROKF) && (dt & TEXTHTML)) Index: src/main.c =================================================================== --- src/main.c (revision 2276) +++ src/main.c (working copy) @@ -189,6 +189,8 @@ { "level", 'l', OPT_VALUE, "reclevel", -1 }, { "limit-rate", 0, OPT_VALUE, "limitrate", -1 }, { "load-cookies", 0, OPT_VALUE, "loadcookies", -1 }, + { "max-size", 'M', OPT_VALUE, "maxsize", -1 }, + { "min-size", 's', OPT_VALUE, "minsize", -1 }, { "mirror", 'm', OPT_BOOLEAN, "mirror", -1 }, { "no", 'n', OPT__NO, NULL, required_argument }, { "no-clobber", 0, OPT_BOOLEAN, "noclobber", -1 }, @@ -446,6 +448,10 @@ N_("\ --limit-rate=RATE limit download rate to RATE.\n"), N_("\ + -M, --max-size=SIZE limit maximum file size to SIZE (kB).\n"), + N_("\ + -s, --min-size=SIZE limit minimum file size to SIZE (kB).\n"), + N_("\ --no-dns-cache disable caching DNS lookups.\n"), N_("\ --restrict-file-names=OS restrict chars in file names to ones OS allows.\n"), @@ -675,7 +681,6 @@ stdout); exit (0); } - #ifndef TESTING int main (int argc, char *const *argv) @@ -979,7 +984,7 @@ opt.follow_ftp = old_follow_ftp; } else - status = retrieve_url (*t, &filename, &redirected_URL, NULL, &dt, opt.recursive); + status = retrieve_url (*t, &filename, &redirected_URL, NULL, &dt, opt.recursive, false); if (opt.delete_after && file_exists_p(filename)) { Index: doc/wget.texi =================================================================== --- doc/wget.texi (revision 2276) +++ doc/wget.texi (working copy) @@ -1592,7 +1592,7 @@ @item -l @var{depth} @itemx --level=@var{depth} Specify recursion maximum depth level @var{depth} (@pxref{Recursive -Download}). The default maximum depth is 5. +Download}). The default maximum depth is 5. Zero means infinite recursion. @cindex proxy filling @cindex delete after retrieval @@ -1803,6 +1803,15 @@ Specify the domains that are @emph{not} to be followed. (@pxref{Spanning Hosts}). +@cindex file size range +@item -s @var{size} +@itemx --min-size=@var{size} +Limit the minimum size of non-HTML files to @var{size} kB. Smaller files will not be retrieved. + +@item -M @var{size} +@itemx --max-size=@var{size} +Limit the maximum size of non-HTML files to @var{size} kB. Larger files will not be retrieved. + @cindex follow FTP links @item --follow-ftp Follow @sc{ftp} links from @sc{html} documents. Without this option, @@ -3064,6 +3073,14 @@ too. @item +Retrieve in directory 'pics' all jpeg images from a given site, excluding +files smaller than 50k (to avoid thumbnails) or larger than 400k. + +@example +wget -Ppics -nd -r -l0 -Ajpg,jpeg -s50 -M400 http://www.server.com +@end example + +@item Suppose you were in the middle of downloading, when Wget was interrupted. Now you do not want to clobber the files already present. It would be: Index: src/utils.c =================================================================== --- src/utils.c (revision 2276) +++ src/utils.c (working copy) @@ -432,33 +432,52 @@ #endif } -/* stat file names named PREFIX.1, PREFIX.2, etc., until one that - doesn't exist is found. Return a freshly allocated copy of the - unused file name. */ +/* + * Stat file names named PREFIX-1.SUFFIX, PREFIX-2.SUFFIX, etc., until + * one that doesn't exist is found. Return a freshly allocated copy of + * the unused file name. + */ static char * -unique_name_1 (const char *prefix) +unique_name_1 (const char *s) { int count = 1; - int plen = strlen (prefix); - char *template = (char *)alloca (plen + 1 + 24); - char *template_tail = template + plen; + int p, l = strlen (s); + char *prefix = (char *) alloca (l + 1); + char *suffix = (char *) alloca (l + 1); + char *filename = (char *) alloca (l + 26); + + /* Look for last '.' in filename */ + + for(p = l; p >= 0 && s[p] != '.'; p--); - memcpy (template, prefix, plen); - *template_tail++ = '.'; + /* If none found, then prefix is the whole filename */ + + if (p < 0) + p = l; + /* Extract prefix and (possibly empty) suffix from filename */ + + memcpy (prefix, s, p); + prefix[p] = '\0'; + + memcpy (suffix, s+p, lp); + suffix[lp] = '\0'; + + /* Try indexed filenames until an unused one is found */ + do - number_to_string (template_tail, count++); - while (file_exists_p (template)); + sprintf (filename, "%s-%d%s", prefix, count++, suffix); + while (file_exists_p (filename)); - return xstrdup (template); + return xstrdup (filename); } /* Return a unique file name, based on FILE. - More precisely, if FILE doesn't exist, it is returned unmodified. - If not, FILE.1 is tried, then FILE.2, etc. The first FILE.<number> - file name that doesn't exist is returned. + More precisely, if FILE.SUF doesn't exist, it is returned unmodified. + If not, FILE-1.SUF is tried, then FILE-2.SUF etc. The first + FILE-<number>.SUF file name that doesn't exist is returned. The resulting file is not created, only verified that it didn't exist at the point in time when the function was called. Index: doc/wget.texi =================================================================== --- doc/wget.texi (revision 2276) +++ doc/wget.texi (working copy) @@ -561,16 +561,16 @@ cases, the local file will be @dfn{clobbered}, or overwritten, upon repeated download. In other cases it will be preserved. -When running Wget without @samp{-N}, @samp{-nc}, or @samp{-r}, -downloading the same file in the same directory will result in the -original copy of @var{file} being preserved and the second copy being -named @samp{@var{file}.1}. If that file is downloaded yet again, the -third copy will be named @samp{@var{file}.2}, and so on. When -@samp{-nc} is specified, this behavior is suppressed, and Wget will -refuse to download newer copies of @samp{@var{file}}. Therefore, -``@code{no-clobber}'' is actually a misnomer in this mode---it's not -clobbering that's prevented (as the numeric suffixes were already -preventing clobbering), but rather the multiple version saving that's +When running Wget without @samp{-N}, @samp{-nc}, or @samp{-r}, downloading the +same file in the same directory will result in the original copy of @var{file} +being preserved and the second copy being named +@samp{@var{prefix}-1.@var{suffix}}, assuming @var{file} = @var{prefix.suffix}. +If that file is downloaded yet again, the third copy will be named +@samp{@var{prefix}-2.@var{suffix}}, and so on. When @samp{-nc} is specified, +this behavior is suppressed, and Wget will refuse to download newer copies of +@samp{@var{file}}. Therefore, ``@code{no-clobber}'' is actually a misnomer in +this mode---it's not clobbering that's prevented (as the numeric suffixes were +already preventing clobbering), but rather the multiple version saving that's prevented. When running Wget with @samp{-r}, but without @samp{-N} or @samp{-nc}, @@ -1592,7 +1592,7 @@ @item -l @var{depth} @itemx --level=@var{depth} Specify recursion maximum depth level @var{depth} (@pxref{Recursive -Download}). The default maximum depth is 5. +Download}). The default maximum depth is 5. Zero means infinite recursion. @cindex proxy filling @cindex delete after retrieval @@ -1803,6 +1803,15 @@ Specify the domains that are @emph{not} to be followed. (@pxref{Spanning Hosts}). +@cindex file size range +@item -s @var{size} +@itemx --min-size=@var{size} +Limit the minimum size of non-HTML files to @var{size} kB. Smaller files will not be retrieved. + +@item -M @var{size} +@itemx --max-size=@var{size} +Limit the maximum size of non-HTML files to @var{size} kB. Larger files will not be retrieved. + @cindex follow FTP links @item --follow-ftp Follow @sc{ftp} links from @sc{html} documents. Without this option, @@ -3064,6 +3073,14 @@ too. @item +Retrieve in directory 'pics' all jpeg images from a given site, excluding +files smaller than 50k (to avoid thumbnails) or larger than 400k. + +@example +wget -Ppics -nd -r -l0 -Ajpg,jpeg -s50 -M400 http://www.server.com +@end example + +@item 

2 Solutions collect form web for “Ошибка «Malformed patch» при использовании diff для источника wget с патчем”

Это обычная проблема, когда diffs скопирован / вставлен в текстовый файл без отступа пространства. Вам нужно добавить пространство перед каждой строкой, за исключением строк, начинающихся с символов «+», «-» и «@@». Чтобы избежать этой проблемы, лучше создать файлы diff вручную вручную (используя diff или средство diff версий), а затем загрузить файл diff целиком вместо копирования и вставки из вашего браузера.

Я бы предположил, что перед применением патча вы проверяете, сделал ли патч его восходящий поток, и доступен в новой / последней / стабильной версии, что решит вашу проблему чистым способом. Даже если вы .c файл .c , вы все равно его скомпилируете, так почему бы не сделать это со свежим стабильным tarball, если патч сделал в восходящем потоке.

Второй комментарий Никильса. Тем не менее, я просто укажу, что вам вообще лучше загружать html-файл, а затем преобразовывать его в текст, а не использовать вырезать и вставлять. Например, вы можете сделать

1) wget -c http://osdir.com/ml/web.wget.patches/2007-07/msg00011.html

2) Откройте файл msg00011.html с помощью Openoffice, например. и сохранить как текст. Конвертер OO неплохо работает, и я не вижу никаких очевидных проблем с конвертированными патчами.

  • Bash Пока петли
  • Ошибка Wget TLS 1.2
  • инструмент командной строки для одной загрузки торрента (например, wget или curl)
  • Продолжить загрузку, если временная метка не изменилась
  • wget --spider: как определить, где неработающие ссылки происходят из
  • wget не сохранять файл после загрузки
  • Почему curl -O -C на Mac не загружается
  • Как заставить wget сохранять содержимое веб-страницы в файл?
  • wget - считается, что загрузка выполняется через 1 секунду, хотя это не
  • wget не позволяет исключить каталоги
  • Загрузите все исходные файлы для веб-страницы
  • Interesting Posts

    Проверьте содержимое файла, ищущего повреждение, размер файла указывает размер «ноль»

    объединить выбранный многостоловый файл столбца

    Kali Linux отключает автоматические обновления пакетов

    Как навсегда переделать кепку в esc в X11?

    Bash: как получить первое число, которое встречается в содержимом переменной

    Ошибка при входе на сервер NIS с помощью графического интерфейса рабочего стола

    Как отображать мои данные (молекулы)?

    Закройте все дескрипторы файлов в bash

    Защита сервера локальной сети с помощью OpenVPN?

    Может ли написать stdout место противодавления в процессе?

    Сценарий не завершается, когда функция, назначенная переменной

    Почему временная блокировка X? Диагностические предложения

    Должен ли я использовать гостевые дополнения VirtualBox из хоста ISO или пула Debian?

    как фильтровать внутренние кавычки?

    NFS: делиться со всеми клиентами, кроме одного

    Linux и Unix - лучшая ОС в мире.