#!/usr/bin/perl -w # # sitescooper - download news from web sites and convert it automatically # into one of several formats suitable for viewing on a Palm # handheld. # # Sitescooper is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2 of the License, or (at your # option) any later version. See the COPYRIGHT section in the POD # documentation below, or the "doc/gpl.html" file in the distribution, # for further details. # # Skip down to read the POD documentation, or search for "=head1". #--------------------------------------------------------------------------- # Mac users -- you can pass in command-line arguments, such as "-isilo" # or "-refresh", by changing this line. These will be prepended to the # names of any documents you drag and drop onto sitescooper. # Another way to do this is by creating a .scp config file with the # CommandLine: parameter in it, and double-clicking it to run sitescooper. # $MAC_ARGS = ""; #--------------------------------------------------------------------------- sub usage { die <<__ENDOFUSAGE; Sitescooper - download news from web sites and convert it automatically into one of several formats suitable for viewing on a Palm handheld. sitescooper [options] [ [-site sitename] ...] sitescooper [options] [-sites sitename ...] sitescooper [options] [-name nm] [-levels n] [-storyurl regexp] [-set sitefileparam value] url [...] Options: [-debug] [-refresh] [-fullrefresh] [-config file] [-install dir] [-instapp app] [-dump] [-dumpprc] [-nowrite] [-nodates] [-quiet] [-admin cmd] [-stdout-to file] [-keep-tmps] [-fromcache] [-noheaders] [-nofooters] [-badcache] [-outputtemplate file.tmpl] [-filename template] [-prctitle template] [-parallel] [-disc] [-limit numkbytes] [-maxlinks numlinks] [-maxstories numstories] [-timeout numsecs] [-text | -html | -mhtml | -doc | -plucker | -mplucker | -isilo | -misilo | -misilox | -richreader | -rss | -rss10 | -rss20 | -pipe fmt command] [-bw | -color] [-maxcolors n] [-fixlinks | -keeplinks | -nolinkrewrite] [-cvtargs args_for_converter] file:// and http:// URLs are supported. Version: $Sitescooper::Main::VERSION __ENDOFUSAGE } use strict; use FindBin; use vars qw{ $MAC_ARGS $SLASH }; $SIG{__WARN__} = 'warn_log'; $SIG{__DIE__} = 'die_log'; BEGIN { # This code will track down the directories where sitescooper # keeps its modules, portably, so it'll work on Macs, UNIX and Win32. # Sadly, we can't rely on File::Spec to do the slash-twiddling for us; # it's not included with some versions of MacPerl. :( # my $bin = $FindBin::Bin; my $slash = '/'; # between directories in a path my $dirtrailer = ''; # at the end of a directory's path if ($^O eq 'MacOS') { $slash = ':'; $dirtrailer = ':'; } elsif ($^O =~ /(win|os2)/) { $slash = '\\'; } # first, find the common candidates: "lib" and "site_perl" in # the same dir as the script. These are likely on all platforms. # Note that we use push for all modules; the sitescooper ones # will not be installed, and the LWP ones should use the local # copies anyway as some of them (HTML::Parser for example) will # be able to use XS code which is faster. $_ = $bin.$slash. "lib" . $dirtrailer; push (@INC, $_); $_ = $bin.$slash. "site_perl" . $dirtrailer; push (@INC, $_); # next, support UNIX-style /usr-based installation, where the # script lives in /usr/*/bin and the support files in /usr/*/lib # or /usr/*/share. This only happens on UNIX afaik. if ($slash eq '/') { $_ = $bin . "/../lib/sitescooper"; if (-d $_) { push (@INC, $_); push (@INC, "$_/lib"); push (@INC, "$_/site_perl"); } $_ = $bin . "/../share/sitescooper"; if (-d $_) { push (@INC, $_); push (@INC, "$_/lib"); push (@INC, "$_/site_perl"); } } } # CUSTOMISE: you may need to add 'use lib "your_lib_dir";' here use lib 'lib'; use File::Copy; use Sitescooper::Main; my $ui = new Sitescooper::CmdlineUI(); my $scoop = new Sitescooper::Main($ui); my $system_config = ''; my $config = ''; $SLASH = $Sitescooper::Main::SLASH; # Mac support for command-line args. my @macargs = split (' ', $MAC_ARGS); unshift (@ARGV, @macargs); $MAC_ARGS = ''; # parse args, and use the -config argument if set. $scoop->parse_commandline (@ARGV); if (defined $scoop->{cf}->{config}) { $config = $scoop->{cf}->{config}; } # once-off: migrate the old tmp files to their new locations. migrate_user_tmp_files(); # set the paths for state directories and config files. if (Sitescooper::Main::MyOS() eq 'UNIX') { $scoop->set_tmp_dir ("~/.sitescooper"); $scoop->make_basic_dirs(); # make tmpdir first off $system_config = $scoop->get_first_existing_path ("/etc/sitescooper.cf", "/etc/sitescooper/sitescooper.cf", "/opt/etc/sitescooper.cf", "/opt/etc/sitescooper/sitescooper.cf", "/usr/local/etc/sitescooper.cf", "/usr/local/etc/sitescooper/sitescooper.cf", "%S/sitescooper.cf"); $config ||= $scoop->sed_path ("~/.sitescooper/sitescooper.cf"); create_home_cf_if_needed($config); } else { $scoop->set_tmp_dir ("%S/tmp"); $scoop->make_basic_dirs(); # make tmpdir first off $system_config = $scoop->sed_path ("%S/sitescooper.cf"); $config ||= $scoop->sed_path ("%S/sitescooper.cf"); } # read the configuration. (note: do not use %T until interpret_basic_config() is # called!) # $scoop->verbose ("Reading configuration from \"$config\"."); open (IN, "<$config") or die "cannot read \"$config\"\n"; my @conf = (); close IN; $scoop->read_config ($config, @conf); $scoop->interpret_basic_config(); $scoop->load_all_modules(); # this has to be here after reading the config; config may change setting # of the %S escape. # if (!defined $scoop->{cf}->{mysitesdir}) { if (Sitescooper::Main::MyOS() eq 'UNIX') { $scoop->set_my_sites_dir ("sites", "~/sites", "%T/sites"); } else { $scoop->set_my_sites_dir ("sites", "%S/sites", "%T/sites"); } } $scoop->set_shared_sites_dir ("%S/share/site_samples", "%S/site_samples"); $scoop->set_site_choices_file ("%T/site_choices.txt"); # set the interface used to ask for site passwords # my $asker = new Sitescooper::ConsolePasswordAsker($scoop); $scoop->{useragent}->set_password_asker ($asker); # see where to save PDB files # if (!defined $scoop->{cf}->{pilotinstdir} && !defined $scoop->{cf}->{pilotinstapp}) { # write PDBs to cwd if nothing is set. $scoop->verbose ("Warning: since no PilotInstallDir was specified". " in the configuration,\nPDB files will be saved to current directory.\n"); $scoop->parse_commandline ('-install', $scoop->{cwd}); } $scoop->read_site_choices(); $scoop->find_sites_in_sites_dir(); $scoop->read_site_files(); $scoop->read_newshound_profiles(); $scoop->read_commandline_scoops(); $scoop->get_ready_for_run (); $scoop->run (); $scoop->finish (); $ui->cleanexit (); #--------------------------------------------------------------------------- sub migrate_user_tmp_files { my ($tmp, $oldtmp, $newtmp, $usertmp); if (Sitescooper::Main::MyOS() eq 'UNIX') { $oldtmp = $scoop->sed_path ("~/.sitescooper"); $usertmp = $scoop->sed_path ("~/.sitescooper/sitescooper_$<"); $newtmp = $scoop->sed_path ("~/.sitescooper"); } else { $oldtmp = $scoop->sed_path ("%t"); $usertmp = $scoop->sed_path ("%t/sitescooper_$<"); $newtmp = $scoop->sed_path ("%S/tmp"); } if (-e $usertmp.$SLASH."already_seen.txt") { for my $item (qw(cookies txt cache already_seen.txt prc sites inst.txt site_logins)) { my $old = "$usertmp$SLASH$item"; my $new = "$newtmp$SLASH$item"; if ($old ne $new && -e $old) { warn "Migrating $old to: $new\n"; move ($old, $new) or warn "Failed to rename $old to $new\n"; } } for my $item (qw(site_choices.txt)) { my $old = "$oldtmp$SLASH$item"; my $new = "$newtmp$SLASH$item"; if ($old ne $new && -e $old) { warn "Migrating $old to: $new\n"; move ($old, $new) or warn "Failed to rename $old to $new\n"; } } } } sub create_home_cf_if_needed { my $homecf = shift; if (!-e $homecf) { warn "Copying default config to \"$homecf\".\n". "Edit this if you need to change any configuration settings.\n\n"; copy ($system_config, $homecf) or die "cannot copy $system_config to $homecf\n"; } } # --------------------------------------------------------------------------- # User interface for command-line use. Override for GUIs, mod_perl use, etc. package Sitescooper::CmdlineUI; use Sitescooper::UI; BEGIN { @Sitescooper::CmdlineUI::ISA = qw(Sitescooper::UI); } sub new { my ($class) = shift; $class = ref($class) || $class; my $self = $class->SUPER::new(); $self; } sub usage { my ($self) = @_; main::usage(); } sub scoop_die { my ($self, @msg) = @_; print STDERR @msg; $self->cleanexit(2); } sub scoop_warn { my ($self, @msg) = @_; print STDERR @msg; } sub dbg { my ($self, @msg) = @_; print STDERR "debug: ",@msg,"\n"; } sub sitewarn { my ($self, $fname, @msg) = @_; warn "SITE WARNING: $fname: ".join('', @msg)."\n"; } sub verbose { my ($self, @msg) = @_; print STDERR @msg,"\n"; } sub cleanexit { my ($self) = @_; shift; $SIG{__WARN__} = ''; $SIG{__DIE__} = ''; exit @_; } # --------------------------------------------------------------------------- # and now the POD: =head1 NAME sitescooper - download news from web sites and convert it automatically into one of several formats suitable for viewing on a Palm handheld. =head1 SYNOPSIS sitescooper [options] [ [-site sitename] ...] sitescooper [options] [-sites sitename ...] sitescooper [options] [-name nm] [-levels n] [-storyurl regexp] [-set sitefileparam value] url [...] Options: [-debug] [-refresh] [-fullrefresh] [-config file] [-install dir] [-instapp app] [-dump] [-dumpprc] [-nowrite] [-nodates] [-quiet] [-admin cmd] [-nolinkrewrite] [-stdout-to file] [-badcache] [-keep-tmps] [-fromcache] [-noheaders] [-nofooters] [-outputtemplate file.tmpl] [-grep] [-profile file.nhp] [-profiles file.nhp file2.nhp ...] [-filename template] [-prctitle template] [-parallel] [-disc] [-limit numkbytes] [-maxlinks numlinks] [-maxstories numstories] [-timeout numsecs] [-text | -html | -mhtml | -doc | -plucker | -mplucker | -isilo | -misilo | -misilox | -richreader | -pipe fmt command] [-bw | -color] [-maxcolors n] [-cvtargs args_for_converter] =head1 DESCRIPTION This script, in conjunction with its configuration file and its set of B files, will download news stories from several top news sites into text format and/or onto your Palm handheld (with the aid of the B/B or B utilities). Alternatively URLs can be supplied on the command line, in which case those URLs will be downloaded and converted using a reasonable set of default settings. HTTP and local files, using the C protocol, are both supported. Multiple types of sites are supported: =over 4 1-level sites, where the text to be converted is all present on one page (such as Slashdot, Linux Weekly News, BluesNews, NTKnow, Ars Technica); 2-level sites, where the text to be converted is linked to from a Table of Contents page (such as Wired News, BBC News, and I, Cringely); 3-level sites, where the text to be converted is linked to from a Table of Contents page, which in turned is linked to from a list of issues page (such as PalmPower). =back In addition sites that post news as items on one big page, such as Slashdot, Ars Technica, and BluesNews, are supported using diff. Note that at this moment in time, the URLs-on-the-command-line invocation format does not support 2- or 3-level sites. The script is portable to most UNIX variants that support perl, as well as the Win32 platform (tested with ActivePerl 5.00502 build 509). sitescooper maintains a cache in its temporary directory; files are kept in this cache for a week at most. Ditto for the text output directory (set with B in the built-in configuration). If a password is required for the site, and the current sitescooper session is interactive, the user will be prompted for the username and password. This authentication token will be saved for later use. This way a site that requires login can be set up as a .site -- just log in once, and your password is saved for future non-interactive runs. Note however that the encryption used to hide the password in the sitescooper configuration is pretty transparent; I recommend that rather than using your own username and password to log in to passworded sites, a dedicated, sitescooper account is used instead. =head1 OPTIONS =over 4 =item -refresh Refresh all links -- ignore the F file, do not diff pages, and always fetch links. If a cached page is available, it will be used. =item -fullrefresh Refresh all links -- ignore the F file, do not diff pages, and always fetch links, even if they are available in the cache. =item -config file Read the configuration from B instead of using the built-in one. =item -limit numkbytes Set the limit for output file size to B kilobytes, instead of the default 200K. A limit of 0 means unlimited, any amount of output. =item -maxlinks numlinks Stop retrieving web pages after B have been traversed. This is not used to specify how "deep" a site should be scooped -- it is the number of links followed in total. =item -maxstories numstories Stop retrieving web pages after B stories have been retrieved. =item -timeout numsecs Time out connection attempts after B. The default is 10*60, 10 minutes. =item -install dir The directory to save PDB files to once they've been converted, in order to have them installed to your Palm handheld. =item -instapp app The application to run to install PDB files onto your Palm, once they've been converted. =item -site sitename Limit the run to the site named in the B argument. Normally all available sites will be downloaded. To limit the run to 2 or more sites, provide multiple B<-site> arguments like so: -site ntk.site -site tbtf.site =item -sites sitename [...] Limit the run to multiple sites; an easier way to specify multiple sites than using the -site argument for each file. =item -grep Use James Brown's B profile searching code. Any sites that do not contain B will then be searched for the active profiles. Active profiles are loaded from the B specified in the sitescooper configuration file, or specified using the B<-profile> or B<-profiles> arguments. =item -profile file.nhp Limit the run to the site named in the B argument. Normally all available sites will be downloaded. To limit the run to 2 or more sites, provide multiple B<-profile> arguments like so: -profile ntk.site -profile tbtf.site =item -profiles file.nhp [...] Limit the run to multiple sites; an easier way to specify multiple sites than using the -profile argument for each file. =item -name name When specifying a URL on the command-line, this provides the name that should be used when installing the site to the Pilot. It acts exactly the same way as the Name: field in a site file. =item -levels n When specifying a URL on the command-line, this indicates how many levels a site has. Not needed when using .site files. =item -storyurl regexp When specifying a URL on the command-line, this indicates the regular expression which links to stories should conform to. Not needed when using .site files. =item -doc Convert the page(s) downloaded into DOC format, with all the articles listed in full, one after the other. =item -text Convert the page(s) downloaded into plain text format, with all the articles listed in full, one after the other. =item -html Convert the page(s) downloaded into HTML format, on one big page, with a table of contents (taken from the site if possible), followed by all the articles one after another. =item -mhtml Convert the page(s) downloaded into HTML format, but retain the multiple-page format. This will create the output in a directory called B; in conjunction with the B<-dump> argument, it will output the path of this directory on standard output before exiting. =item -plucker Convert the page(s) downloaded into Plucker format (see http://plucker.gnu-designs.com/ ), on one big page. The page(s) will be displayed with a table of contents (taken from the site if possible), followed by all the articles one after another. =item -isilo Convert the page(s) downloaded into iSilo format (see http://www.isilo.com/ ), on one big page. This is the default. The page(s) will be displayed with a table of contents (taken from the site if possible), followed by all the articles one after another. =item -misilo Convert the page(s) downloaded into iSilo format (see http://www.isilo.com/ ), with one iSilo document per site, with each story on a separate page. The iSilo document will have a table-of-contents page, taken from the site if possible, with each article on a separate page. =item -misilox Convert the page(s) downloaded into iSilo format (see http://www.isilo.com/ ), with one iSilo document per site, with each story on a separate page. This uses the iSiloXC converter. The iSilo document will have a table-of-contents page, taken from the site if possible, with each article on a separate page. =item -richreader Convert the page(s) downloaded into RichReader format using HTML2Doc.exe (see http://users.erols.com/arenakm/palm/RichReader.html ). The page(s) will be displayed with a table of contents (taken from the site if possible), followed by all the articles one after another. =item -pipe fmt command Convert the page(s) downloaded into an arbitrary format, using the command provided. Sitescooper will still rewrite the page(s) according to the B argument, which should be one of: =over 4 =item text Plain text format. =item html HTML in one big page. =item mhtml HTML in multiple pages. =back The command argument can contain C<__SCOOPFILE__>, which will be replaced with the filename of the file containing the rewritten pages in the above format, C<__SYNCFILE__>, which will be replaced with a suitable filename in the Palm synchronization folder, and C<__TITLE__>, which will be replaced by the title of the file (generally a string containing the date and site name). Note that for the B<-mhtml> switch, C<__SCOOPFILE__> will be replaced with the name of the file containing the table-of-contents page. It's up to the conversion utility to follow the href links to the other files in that directory. =item -cvtargs Arguments for the conversion utility. =item -bw Indicate that the target can display only 2-bit images, black and white only. This is generally the default for iSilo and Plucker. =item -color Indicate that the target can display color images. =item -maxcolors n Indicate that the target supports a maximum of 'n' colors. Plucker will display images better on some Palms if this is used. =item -fixlinks Rewrite links to external sites or unscooped pages as underlined text, to differentiate them from links to scooped pages. This is the default behaviour for most formats apart from B<-plucker> or B<-mplucker>. =item -keeplinks Do not rewrite links to external sites or unscooped pages; leave them pointing outside the current scoop. However, links to other pages that are included in the current scoop, are rewritten to point to the scooped pages instead of the source URL. This is the default for Plucker (B<-plucker> or B<-mplucker> arguments). =item -nolinkrewrite Do not rewrite links on scooped documents -- leave them exactly as they are. This includes even links to other scooped pages. See also B<-keeplinks>). =item -dump Output the page(s) downloaded directly to stdout in text or HTML format, instead of writing them to files and converting each one. This option NO LONGER implies B<-text>, like it used to, so to dump text, use B<-dump -text>. =item -dumpprc Output the page(s) downloaded directly to stdout, in converted format as a PDB file (note: not PRC format!), suitable for installation to a Palm handheld. =item -nowrite Test mode -- do not write to the cache or already_seen file, instead write what would be written normally to a directory called new_cache and a new_already_seen file. This is very handy when writing a new site file. =item -badcache Send some HTTP headers to bypass web caching proxy servers. This is generally useful if a web caching proxy server somewhere between sitescooper and the target site is returning out-of-date files. =item -debug Enable debugging output. This output is in addition to the usual progress messages. =item -quiet Process sites quietly, without printing the usual progress messages to STDERR. Warnings about incorrect site files and system errors will still be output, however. =item -admin cmd Perform an administrative command. This is intended to ease the task of writing scripts which use sitescooper output. The following admin commands are available: =over 4 =item dump-sites List the sites which would be scooped on a scooping run, and their URLs. Instead of scooping any sites, sitescooper will exit after performing this task. The format is one site per line, with the site file name first, a tab, the site's URL, a tab, the site name, a tab, and the output filename that would be generated without path or extension. For example: S =item journal Write a journal with dumps of the documents as they pass through the formatting and stripping steps of the scooping process. This is written to a file called B in the sitescooper temporary directory. =item import-cookies file Import a Netscape B file into sitescooper, so that certain sites which require them, can use them. For example, the site B requires this. Here's how to import cookies on a UNIX machine: S and on Windows: S Unfortunately, MS Internet Explorer cookies are currently unsupported. If you wish to write a patch to support them, that'd be great. =back =item -noheaders Do not attach the sitescooper header (URL, site name, and navigation links) to each page. =item -nofooters Do not attach the sitescooper footer ("copyright retained by original authors" blurb) to each page. =item -outputtemplate file.tmpl Read the output formatting template from the file B. This overrides the settings of the B<-noheaders> and B<-nofooters> flags. See the OUTPUT TEMPLATES section below for details on this. =item -fromcache Do not perform any network access, retrieve everything from the cache or the shared cache. =item -filename template Change the format of output filenames. B