X-Git-Url: http://dxcluster.net/gitweb/gitweb.cgi?a=blobdiff_plain;f=perl%2FSpot.pm;h=afc3410fb6e04045bdf7c774e766c6822a80f190;hb=88665a2bed3b9ec9e97237938a95a045b2a21bb4;hp=5831e9b31d974ec2ad3c9e14d560b3c42fc34e99;hpb=8195bc13ac14b8fbf13d804186680653b5fd8564;p=spider.git diff --git a/perl/Spot.pm b/perl/Spot.pm index 5831e9b3..afc3410f 100644 --- a/perl/Spot.pm +++ b/perl/Spot.pm @@ -8,28 +8,30 @@ package Spot; -use FileHandle; +use IO::File; use DXVars; use DXDebug; use DXUtil; use DXLog; use Julian; use Prefix; -use Carp; use strict; -use vars qw($fp $maxspots $defaultspots $maxdays $dirprefix); +use vars qw($fp $maxspots $defaultspots $maxdays $dirprefix %dup $duplth $dupage); $fp = undef; $maxspots = 50; # maximum spots to return $defaultspots = 10; # normal number of spots to return $maxdays = 35; # normal maximum no of days to go back $dirprefix = "spots"; +%dup = (); # the spot duplicates hash +$duplth = 20; # the length of text to use in the deduping +$dupage = 3*3600; # the length of time to hold spot dups sub init { mkdir "$dirprefix", 0777 if !-e "$dirprefix"; - $fp = DXLog::new($dirprefix, "dat", 'd') + $fp = DXLog::new($dirprefix, "dat", 'd'); } sub prefix @@ -41,25 +43,37 @@ sub prefix sub add { my @spot = @_; # $freq, $call, $t, $comment, $spotter = @_ + my @out = @spot[0..4]; # just up to the spotter - # sure that the numeric things are numeric now (saves time later) - $spot[0] = 0 + $spot[0]; - $spot[2] = 0 + $spot[2]; + # normalise frequency + $spot[0] = sprintf "%.f", $spot[0]; - # remove ssid if present on spotter - $spot[4] =~ s/-\d+$//o; + # remove ssids if present on spotter + $out[4] =~ s/-\d+$//o; - # add the 'dxcc' country on the end - my @dxcc = Prefix::extract($spot[1]); - push @spot, (@dxcc > 0 ) ? $dxcc[1]->dxcc() : 0; - - my $buf = join("\^", @spot); + # remove leading and trailing spaces + $spot[3] = unpad($spot[3]); + + # add the 'dxcc' country on the end for both spotted and spotter, then the cluster call + my @dxcc = Prefix::extract($out[1]); + my $spotted_dxcc = (@dxcc > 0 ) ? $dxcc[1]->dxcc() : 0; + my $spotted_itu = (@dxcc > 0 ) ? $dxcc[1]->itu() : 0; + my $spotted_cq = (@dxcc > 0 ) ? $dxcc[1]->cq() : 0; + push @out, $spotted_dxcc; + @dxcc = Prefix::extract($out[4]); + my $spotter_dxcc = (@dxcc > 0 ) ? $dxcc[1]->dxcc() : 0; + my $spotter_itu = (@dxcc > 0 ) ? $dxcc[1]->itu() : 0; + my $spotter_cq = (@dxcc > 0 ) ? $dxcc[1]->cq() : 0; + push @out, $spotter_dxcc; + push @out, $spot[5]; + + my $buf = join("\^", @out); # compare dates to see whether need to open another save file (remember, redefining $fp # automagically closes the output file (if any)). - $fp->writeunix($spot[2], $buf); + $fp->writeunix($out[2], $buf); - return $buf; + return (@out, $spotted_itu, $spotted_cq, $spotter_itu, $spotter_cq); } # search the spot database for records based on the field no and an expression @@ -73,7 +87,10 @@ sub add # $f2 = date in unix format # $f3 = comment # $f4 = spotter -# $f5 = dxcc country +# $f5 = spotted dxcc country +# $f6 = spotter dxcc country +# $f7 = origin +# # # In addition you can specify a range of days, this means that it will start searching # from days less than today to days less than today @@ -92,7 +109,7 @@ sub search my $ref; my $i; my $count; - my @today = Julian::unixtoj(time); + my @today = Julian::unixtoj(time()); my @fromdate; my @todate; @@ -127,8 +144,7 @@ sub search $fp->close; # close any open files - LOOP: - for ($i = 0; $i < $maxdays; ++$i) { # look thru $maxdays worth of files only + for ($i = $count = 0; $i < $maxdays; ++$i) { # look thru $maxdays worth of files only my @now = Julian::sub(@fromdate, $i); # but you can pick which $maxdays worth last if Julian::cmp(@now, @todate) <= 0; @@ -183,4 +199,45 @@ sub readfile } return @spots; } + +# enter the spot for dup checking and return true if it is already a dup +sub dup +{ + my ($freq, $call, $d, $text) = @_; + + # dump if too old + return 2 if $d < $main::systime - $dupage; + + $freq = sprintf "%.1f", $freq; # normalise frequency + $d /= 60; # to the nearest minute + chomp $text; + $text = substr($text, 0, $duplth) if length $text > $duplth; + my $dupkey = "$freq|$call|$d|$text"; + return 1 if exists $dup{$dupkey}; + $dup{$dupkey} = $d * 60; # in seconds (to the nearest minute) + return 0; +} + +# called every hour and cleans out the dup cache +sub process +{ + my $cutoff = $main::systime - $dupage; + while (my ($key, $val) = each %dup) { + delete $dup{$key} if $val < $cutoff; + } +} + +sub listdups +{ + my @out; + for (sort { $dup{$a} <=> $dup{$b} } keys %dup) { + my $val = $dup{$_}; + push @out, "$_ = $val (" . cldatetime($val) . ")"; + } + return @out; +} 1; + + + +