X-Git-Url: http://dxcluster.net/gitweb/gitweb.cgi?a=blobdiff_plain;f=perl%2FGeomag.pm;h=6bc925b8bd3fa2bedd3f2af0f86ea4958a618a9b;hb=b9dffeff7239952814342dad19db3a51def6fab7;hp=05aefeff05cf90ea7dd60441ff340db81561a091;hpb=261c75481017f32ca491df475b36e9600ca430a1;p=spider.git diff --git a/perl/Geomag.pm b/perl/Geomag.pm index 05aefeff..6bc925b8 100644 --- a/perl/Geomag.pm +++ b/perl/Geomag.pm @@ -16,11 +16,13 @@ use DXLog; use Julian; use IO::File; use DXDebug; +use DXDupe; use strict; + use vars qw($date $sfi $k $a $r $forecast @allowed @denied $fp $node $from $dirprefix $param - %dup $duplth $dupage); + $duplth $dupage $filterdef); $fp = 0; # the DXLog fcb $date = 0; # the unix time of the WWV (notional) @@ -33,13 +35,25 @@ $node = ""; # originating node $from = ""; # who this came from @allowed = (); # if present only these callsigns are regarded as valid WWV updators @denied = (); # if present ignore any wwv from these callsigns -%dup = (); # the spot duplicates hash $duplth = 20; # the length of text to use in the deduping $dupage = 12*3600; # the length of time to hold spot dups $dirprefix = "$main::data/wwv"; $param = "$dirprefix/param"; +$filterdef = bless ([ + # tag, sort, field, priv, special parser + ['by', 'c', 0], + ['origin', 'c', 1], + ['channel', 'c', 2], + ['by_dxcc', 'nc', 3], + ['by_itu', 'ni', 4], + ['by_zone', 'nz', 5], + ['origin_dxcc', 'nc', 6], + ['origin_itu', 'ni', 7], + ['origin_zone', 'nz', 8], + ], 'Filter::Cmd'); + sub init { $fp = DXLog::new('wwv', 'dat', 'm'); @@ -72,12 +86,13 @@ sub store sub update { my ($mydate, $mytime, $mysfi, $mya, $myk, $myforecast, $myfrom, $mynode, $myr) = @_; - if ((@allowed && grep {$_ eq $from} @allowed) || - (@denied && !grep {$_ eq $from} @denied) || + $myfrom =~ s/-\d+$//; + if ((@allowed && grep {$_ eq $myfrom} @allowed) || + (@denied && !grep {$_ eq $myfrom} @denied) || (@allowed == 0 && @denied == 0)) { # my $trydate = cltounix($mydate, sprintf("%02d18Z", $mytime)); - if ($mydate >= $date) { + if ($mydate > $date) { if ($myr) { $r = 0 + $myr; } else { @@ -162,7 +177,7 @@ sub search { my $from = shift; my $to = shift; - my @date = $fp->unixtoj(shift); + my $date = $fp->unixtoj(shift); my $pattern = shift; my $search; my @out; @@ -186,7 +201,7 @@ sub search $fp->close; # close any open files - my $fh = $fp->open(@date); + my $fh = $fp->open($date); for ($count = 0; $count < $to; ) { my @in = (); if ($fh) { @@ -230,8 +245,8 @@ sub print_item # sub readfile { - my @date = $fp->unixtoj(shift); - my $fh = $fp->open(@date); + my $date = $fp->unixtoj(shift); + my $fh = $fp->open($date); my @spots = (); my @in; @@ -247,39 +262,18 @@ sub readfile # enter the spot for dup checking and return true if it is already a dup sub dup { - my ($d, $sfi, $k, $a, $text) = @_; + my ($d, $sfi, $k, $a, $text, $call) = @_; # dump if too old return 2 if $d < $main::systime - $dupage; -# chomp $text; -# $text = substr($text, 0, $duplth) if length $text > $duplth; - my $dupkey = "$d|$sfi|$k|$a"; - return 1 if exists $dup{$dupkey}; - $dup{$dupkey} = $d; # in seconds (to the nearest minute) - return 0; -} - -# called every hour and cleans out the dup cache -sub process -{ - my $cutoff = $main::systime - $dupage; - while (my ($key, $val) = each %dup) { - delete $dup{$key} if $val < $cutoff; - } + my $dupkey = "W$d|$sfi|$k|$a|$call"; + return DXDupe::check($dupkey, $main::systime+$dupage); } sub listdups { - my $regex = shift; - $regex = '.*' unless $regex; - $regex =~ s/[\$\@\%]//g; - my @out; - for (sort { $dup{$a} <=> $dup{$b} } grep { m{$regex}i } keys %dup) { - my $val = $dup{$_}; - push @out, "$_ = " . cldatetime($val); - } - return @out; + return DXDupe::listdups('W', $dupage, @_); } 1; __END__;