diff --git a/.gitattributes b/.gitattributes index 35bf617dfe96408c1cca15779f5f066167a3deb1..cce1ab0dd7ec756420f4a98f892c364e6b7ce68f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -752,6 +752,7 @@ CEP/DP3/DPPP/include/DPPP/SourceDBUtil.h -text CEP/DP3/DPPP/include/DPPP/Stokes.h -text CEP/DP3/DPPP/include/DPPP/SubtractMixed.h -text CEP/DP3/DPPP/package.dox -text +CEP/DP3/DPPP/share/HBAdefault -text CEP/DP3/DPPP/share/LBAdefault -text CEP/DP3/DPPP/src/Apply.cc -text CEP/DP3/DPPP/src/BandpassCorrector.cc -text @@ -760,7 +761,6 @@ CEP/DP3/DPPP/src/ComplexMedianFlagger2.cc -text CEP/DP3/DPPP/src/DataBuffer.cc -text CEP/DP3/DPPP/src/DataSquasher.cc -text CEP/DP3/DPPP/src/EstimateMixed.cc -text -CEP/DP3/DPPP/src/EstimateNDPPP.cc -text CEP/DP3/DPPP/src/FrequencyFlagger.cc -text CEP/DP3/DPPP/src/GaussianSource.cc -text CEP/DP3/DPPP/src/IDPPP.cc -text @@ -3554,6 +3554,21 @@ RTCP/IONProc/src/StreamMultiplexer.h -text RTCP/IONProc/src/generateDelays.cc -text RTCP/IONProc/test/CMakeLists.txt -text RTCP/IONProc/test/RTCP.parset -text +RTCP/IONProc/test/newInputSection/CMakeLists.txt -text +RTCP/IONProc/test/newInputSection/OMPThread.h -text +RTCP/IONProc/test/newInputSection/Poll.h -text +RTCP/IONProc/test/newInputSection/Ranges.h -text +RTCP/IONProc/test/newInputSection/SampleBuffer.h -text +RTCP/IONProc/test/newInputSection/SharedMemory.h -text +RTCP/IONProc/test/newInputSection/StationData.h -text +RTCP/IONProc/test/newInputSection/StationID.h -text +RTCP/IONProc/test/newInputSection/StationSettings.h -text +RTCP/IONProc/test/newInputSection/TimeSync.h -text +RTCP/IONProc/test/newInputSection/foo.cc -text +RTCP/IONProc/test/newInputSection/newInputSection.cc -text +RTCP/IONProc/test/newInputSection/newInputSection_old.cc -text +RTCP/IONProc/test/newInputSection/shmtest.cc -text +RTCP/IONProc/test/newInputSection/tRSPTimeStamp.cc -text RTCP/IONProc/test/tDelayCompensation.cc -text RTCP/IONProc/test/tDelayCompensation.parset -text RTCP/IONProc/test/tDelayCompensation.sh -text diff --git a/CEP/DP3/AOFlagger/include/AOFlagger/gui/plot/plotpropertieswindow.h b/CEP/DP3/AOFlagger/include/AOFlagger/gui/plot/plotpropertieswindow.h index c37e9384de10493922be00b9f21ae144577fc4f9..75e5a089017bfa8613585bc4c12db378af2b59d5 100644 --- a/CEP/DP3/AOFlagger/include/AOFlagger/gui/plot/plotpropertieswindow.h +++ b/CEP/DP3/AOFlagger/include/AOFlagger/gui/plot/plotpropertieswindow.h @@ -27,6 +27,7 @@ #include <gtkmm/buttonbox.h> #include <gtkmm/checkbutton.h> #include <gtkmm/entry.h> +#include <gtkmm/image.h> #include <gtkmm/label.h> #include <gtkmm/frame.h> #include <gtkmm/radiobutton.h> diff --git a/CEP/DP3/DPPP/include/DPPP/Demixer.h b/CEP/DP3/DPPP/include/DPPP/Demixer.h index 6fc90536be860b4692c9fc5267affa75a7f3bbbf..25b8258a8d695b16cbd46553d4fd2de653c07650 100644 --- a/CEP/DP3/DPPP/include/DPPP/Demixer.h +++ b/CEP/DP3/DPPP/include/DPPP/Demixer.h @@ -193,7 +193,7 @@ namespace LOFAR { casa::Vector<double> itsFreqDemix; casa::Vector<double> itsFreqSubtr; vector<double> itsUnknowns; - vector<double> itsLastKnowns; + vector<double> itsPrevSolution; uint itsTimeIndex; uint itsNConverged; diff --git a/CEP/DP3/DPPP/share/CMakeLists.txt b/CEP/DP3/DPPP/share/CMakeLists.txt index be25d40163202acd3b44af80392a324916baa639..75e8809b010017d9ebc42db2874d8a08be3703c2 100644 --- a/CEP/DP3/DPPP/share/CMakeLists.txt +++ b/CEP/DP3/DPPP/share/CMakeLists.txt @@ -3,4 +3,5 @@ # Data files install(FILES LBAdefault + HBAdefault DESTINATION share/rfistrategies) diff --git a/CEP/DP3/DPPP/share/HBAdefault b/CEP/DP3/DPPP/share/HBAdefault new file mode 100644 index 0000000000000000000000000000000000000000..1f46b4e4ac94497d4d8e41eb071328e5462deb97 --- /dev/null +++ b/CEP/DP3/DPPP/share/HBAdefault @@ -0,0 +1,89 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- This is a Strategy configuration file for the +rfi detector by AndrĂ© Offringa (offringa@astro.rug.nl). +--> +<rfi-strategy format-version="3.7" reader-version-required="3.4"> + <action type="Strategy"> + <children> + <action type="SetFlaggingAction"> + <new-flagging>0</new-flagging> + </action> + <action type="ForEachPolarisationBlock"> + <on-xx>0</on-xx> + <on-xy>1</on-xy> + <on-yx>1</on-yx> + <on-yy>0</on-yy> + <on-stokes-i>0</on-stokes-i> + <on-stokes-q>0</on-stokes-q> + <on-stokes-u>0</on-stokes-u> + <on-stokes-v>0</on-stokes-v> + <children> + <action type="ForEachComplexComponentAction"> + <on-amplitude>1</on-amplitude> + <on-phase>0</on-phase> + <on-real>0</on-real> + <on-imaginary>0</on-imaginary> + <restore-from-amplitude>0</restore-from-amplitude> + <children> + <action type="IterationBlock"> + <iteration-count>2</iteration-count> + <sensitivity-start>4</sensitivity-start> + <children> + <action type="SumThresholdAction"> + <base-sensitivity>1</base-sensitivity> + <time-direction-flagging>1</time-direction-flagging> + <frequency-direction-flagging>1</frequency-direction-flagging> + </action> + <action type="CombineFlagResults"> + <children> + <action type="FrequencySelectionAction"> + <threshold>3</threshold> + </action> + <action type="TimeSelectionAction"> + <threshold>3.5</threshold> + </action> + </children> + </action> + <action type="SetImageAction"> + <new-image>1</new-image> + </action> + <action type="ChangeResolutionAction"> + <time-decrease-factor>3</time-decrease-factor> + <frequency-decrease-factor>3</frequency-decrease-factor> + <restore-revised>1</restore-revised> + <restore-masks>0</restore-masks> + <children> + <action type="HighPassFilterAction"> + <horizontal-kernel-sigma-sq>2.5</horizontal-kernel-sigma-sq> + <vertical-kernel-sigma-sq>5</vertical-kernel-sigma-sq> + <window-width>21</window-width> + <window-height>31</window-height> + <mode>1</mode> + </action> + </children> + </action> + </children> + </action> + <action type="SumThresholdAction"> + <base-sensitivity>1</base-sensitivity> + <time-direction-flagging>1</time-direction-flagging> + <frequency-direction-flagging>1</frequency-direction-flagging> + </action> + </children> + </action> + </children> + </action> + <action type="SetFlaggingAction"> + <new-flagging>4</new-flagging> + </action> + <action type="StatisticalFlagAction"> + <enlarge-frequency-size>0</enlarge-frequency-size> + <enlarge-time-size>0</enlarge-time-size> + <max-contaminated-frequencies-ratio>0.5</max-contaminated-frequencies-ratio> + <max-contaminated-times-ratio>0.5</max-contaminated-times-ratio> + <minimum-good-frequency-ratio>0.2</minimum-good-frequency-ratio> + <minimum-good-time-ratio>0.2</minimum-good-time-ratio> + </action> + </children> + </action> +</rfi-strategy> diff --git a/CEP/DP3/DPPP/share/LBAdefault b/CEP/DP3/DPPP/share/LBAdefault index 200dc2f961748cd3b7ea90b75ae4afc4a5f3c0c5..0a7ba38f0f5848c66cf7930e464b68c4128f75ed 100644 --- a/CEP/DP3/DPPP/share/LBAdefault +++ b/CEP/DP3/DPPP/share/LBAdefault @@ -1,7 +1,6 @@ <?xml version="1.0" encoding="UTF-8"?> <!-- This is a Strategy configuration file for the rfi detector by AndrĂ© Offringa (offringa@astro.rug.nl). -It is the default strategy for LBA observations. --> <rfi-strategy format-version="3.7" reader-version-required="3.4"> <action type="Strategy"> @@ -32,8 +31,8 @@ It is the default strategy for LBA observations. <children> <action type="SumThresholdAction"> <base-sensitivity>1</base-sensitivity> - <time-direction-flagging>1</time-direction-flagging> - <frequency-direction-flagging>0</frequency-direction-flagging> + <time-direction-flagging>0</time-direction-flagging> + <frequency-direction-flagging>1</frequency-direction-flagging> </action> <action type="CombineFlagResults"> <children> @@ -67,8 +66,8 @@ It is the default strategy for LBA observations. </action> <action type="SumThresholdAction"> <base-sensitivity>1</base-sensitivity> - <time-direction-flagging>1</time-direction-flagging> - <frequency-direction-flagging>0</frequency-direction-flagging> + <time-direction-flagging>0</time-direction-flagging> + <frequency-direction-flagging>1</frequency-direction-flagging> </action> </children> </action> @@ -114,4 +113,4 @@ It is the default strategy for LBA observations. </action> </children> </action> -</rfi-strategy> +</rfi-strategy> \ No newline at end of file diff --git a/CEP/DP3/DPPP/src/DPInfo.cc b/CEP/DP3/DPPP/src/DPInfo.cc index 3901be35654abb9bd7eae13c23d688d07156e9c5..d1b9496ef08586af93bba2e299d680ade867d503 100644 --- a/CEP/DP3/DPPP/src/DPInfo.cc +++ b/CEP/DP3/DPPP/src/DPInfo.cc @@ -129,9 +129,9 @@ namespace LOFAR { itsAntUsed.reserve (itsAntNames.size()); for (uint i=0; i<itsAntMap.size(); ++i) { if (itsAntMap[i] == 0) { - itsAntMap[i] = itsAntUsed.size(); + itsAntMap[i] = itsAntUsed.size(); itsAntUsed.push_back (i); - } + } } } diff --git a/CEP/DP3/DPPP/src/Demixer.cc b/CEP/DP3/DPPP/src/Demixer.cc index 390664d89184392a0ceae60b6ca7380768e1ba74..4e78027e9c484b90be7cf63baebbffc148175b4b 100644 --- a/CEP/DP3/DPPP/src/Demixer.cc +++ b/CEP/DP3/DPPP/src/Demixer.cc @@ -268,18 +268,26 @@ namespace LOFAR { // Handle possible data selection. itsFilter.setInfo (infoIn); const DPInfo& infoSel = itsFilter.getInfo(); + // NB. The number of baselines and stations refer to the number of + // selected baselines and the number of unique stations that participate + // in the selected baselines. itsNBl = infoSel.nbaselines(); + itsNStation = infoSel.antennaUsed().size(); + + // Re-number the station IDs in the selected baselines, removing gaps in + // the numbering due to unused stations. + const vector<int> &antennaMap = infoSel.antennaMap(); + for (uint i=0; i<itsNBl; ++i) { + itsBaselines.push_back(Baseline(antennaMap[infoSel.getAnt1()[i]], + antennaMap[infoSel.getAnt2()[i]])); + } + + // Allocate buffers used to compute the smearing factors. itsFactorBuf.resize (IPosition(4, itsNCorr, itsNChanIn, itsNBl, itsNDir*(itsNDir-1)/2)); itsFactorBufSubtr.resize (IPosition(4, itsNCorr, itsNChanIn, itsNBl, itsNDir*(itsNDir-1)/2)); - for (uint i=0; i<itsNBl; ++i) { - itsBaselines.push_back (Baseline(infoSel.getAnt1()[i], - infoSel.getAnt2()[i])); - } - // Get nr of stations actually used. - ///itsNStation = infoSel.antennaUsed().size(); - itsNStation = infoSel.antennaNames().size(); + // Adapt averaging to available nr of channels and times. // Use a copy of the DPInfo, otherwise it is updated multiple times. DPInfo infoDemix(infoSel); @@ -321,9 +329,9 @@ namespace LOFAR { // Intialize the unknowns. itsUnknowns.resize(itsNTimeDemix * itsNModel * itsNStation * 8); - itsLastKnowns.resize(itsNModel * itsNStation * 8); - vector<double>::iterator it = itsLastKnowns.begin(); - vector<double>::iterator it_end = itsLastKnowns.end(); + itsPrevSolution.resize(itsNModel * itsNStation * 8); + vector<double>::iterator it = itsPrevSolution.begin(); + vector<double>::iterator it_end = itsPrevSolution.end(); while(it != it_end) { *it++ = 1.0; @@ -344,11 +352,11 @@ namespace LOFAR { os << " instrumentmodel: " << itsInstrumentName << std::endl; itsSelBL.show (os); if (itsSelBL.hasSelection()) { - os << " demixing " << itsFilter.getInfo().nbaselines() - << " out of " << getInfo().nbaselines() << " baselines (" - << itsFilter.getInfo().antennaUsed().size() - << " out of " << getInfo().antennaUsed().size() - << " stations)" << std::endl; + os << " demixing " << itsFilter.getInfo().nbaselines() + << " out of " << getInfo().nbaselines() << " baselines (" + << itsFilter.getInfo().antennaUsed().size() + << " out of " << getInfo().antennaUsed().size() + << " stations)" << std::endl; } os << " targetsource: " << itsTargetSource << std::endl; os << " subtractsources: " << itsSubtrSources << std::endl; @@ -463,51 +471,12 @@ namespace LOFAR { // Estimate gains and subtract source contributions when sufficient time // slots have been collected. if (itsNTimeOut == itsNTimeChunk) { - handleDemix(); + handleDemix(); } itsTimer.stop(); return true; } - void Demixer::handleDemix() - { - if(itsNModel > 0) { - itsTimerSolve.start(); - demix(); - itsTimerSolve.stop(); - // If selection was done, merge the subtract results back into the - // buffer. - if (itsSelBL.hasSelection()) { - mergeSubtractResult(); - } - } - - // Clear the input buffers. - for (size_t i=0; i<itsAvgResults.size(); ++i) { - itsAvgResults[i]->clear(); - } - // Let the next step process the data. - for (uint i=0; i<itsNTimeOutSubtr; ++i) { - itsTimer.stop(); - if (itsSelBL.hasSelection()) { - getNextStep()->process (itsAvgResultFull->get()[i]); - } else { - getNextStep()->process (itsAvgResultSubtr->get()[i]); - } - itsTimer.start(); - } - - // Clear the output buffer. - itsAvgResultFull->clear(); - itsAvgResultSubtr->clear(); - - // Reset counters. - itsNTimeIn = 0; - itsNTimeOut = 0; - itsNTimeOutSubtr = 0; - itsTimeIndex += itsNTimeChunk; - } - void Demixer::finish() { itsTimer.start(); @@ -544,7 +513,7 @@ namespace LOFAR { itsFactorsSubtr.resize(itsNTimeOutSubtr); // Demix the source directions. - handleDemix(); + handleDemix(); } // Write solutions to disk in ParmDB format. @@ -558,6 +527,45 @@ namespace LOFAR { getNextStep()->finish(); } + void Demixer::handleDemix() + { + if(itsNModel > 0) { + itsTimerSolve.start(); + demix(); + itsTimerSolve.stop(); + // If selection was done, merge the subtract results back into the + // buffer. + if (itsSelBL.hasSelection()) { + mergeSubtractResult(); + } + } + + // Clear the input buffers. + for (size_t i=0; i<itsAvgResults.size(); ++i) { + itsAvgResults[i]->clear(); + } + // Let the next step process the data. + for (uint i=0; i<itsNTimeOutSubtr; ++i) { + itsTimer.stop(); + if (itsSelBL.hasSelection()) { + getNextStep()->process (itsAvgResultFull->get()[i]); + } else { + getNextStep()->process (itsAvgResultSubtr->get()[i]); + } + itsTimer.start(); + } + + // Clear the output buffer. + itsAvgResultFull->clear(); + itsAvgResultSubtr->clear(); + + // Reset counters. + itsNTimeIn = 0; + itsNTimeOut = 0; + itsNTimeOutSubtr = 0; + itsTimeIndex += itsNTimeChunk; + } + void Demixer::mergeSubtractResult() { // Merge the selected baselines from the subtract buffer into the @@ -824,24 +832,20 @@ namespace LOFAR { const size_t nCr = 4; const size_t nSamples = nBl * nCh * nCr; - ///for (uint j=0; j<nTimeSubtr; ++j) { - ///cout << itsAvgResultSubtr->get()[j].getData()<<endl; - /// cout << itsAvgResultSubtr->get()[j].getFlags()<<endl; - /// cout << itsAvgResultSubtr->get()[j].getWeights()<<endl; - /// cout << itsAvgResultSubtr->get()[j].getUVW()<<endl; - ///} - ///for (uint i=0; i<itsFactors.size(); ++i) { - /// cout << itsFactors[i] << endl; - ///} - ///for (uint i=0; i<itsFactorsSubtr.size(); ++i) { - /// cout << itsFactorsSubtr[i] << endl; - ///} - vector<ThreadPrivateStorage> threadStorage(nThread); for(vector<ThreadPrivateStorage>::iterator it = threadStorage.begin(), end = threadStorage.end(); it != end; ++it) { initThreadPrivateStorage(*it, nDr, nSt, nBl, nCh, nChSubtr); + + // Copy the previous solution to the thread private vectors of unknowns. + // When solution propagation is disabled, itsPrevSolution is never + // updated. It then contains 1.0+0.0i for the diagonal terms and + // 0.0+0.0i for the off-diagonal terms. Thus, when solution propagation + // is disabled this statement effectively re-initializes the thread + // private vectors of unknowns. + copy(itsPrevSolution.begin(), itsPrevSolution.end(), + it->unknowns.begin()); } const_cursor<double> cr_freq = casa_const_cursor(itsFreqDemix); @@ -853,11 +857,14 @@ namespace LOFAR { { const size_t thread = OpenMP::threadNum(); ThreadPrivateStorage &storage = threadStorage[thread]; - // Copy solutions from global solution array to thread private solution - // array (solution propagation between chunks). - if (!itsPropagateSolutions || ts==0) { - copy(itsLastKnowns.begin(), itsLastKnowns.end(), storage.unknowns.begin()); - } + + // If solution propagation is disabled, re-initialize the thread-private + // vector of unknowns. + if(!itsPropagateSolutions) + { + copy(itsPrevSolution.begin(), itsPrevSolution.end(), + storage.unknowns.begin()); + } // Simulate. // @@ -1009,7 +1016,7 @@ namespace LOFAR { { copy(&(itsUnknowns[(itsTimeIndex + nTime - 1) * nDr * nSt * 8]), &(itsUnknowns[(itsTimeIndex + nTime) * nDr * nSt * 8]), - itsLastKnowns.begin()); + itsPrevSolution.begin()); } // Update convergence count. @@ -1041,30 +1048,40 @@ namespace LOFAR { resolution[1] = itsTimeIntervalAvg; parmDB.setDefaultSteps(resolution); + // Map station indices in the solution array to the corresponding antenna + // names. This is required because solutions are only produced for + // stations that participate in one or more baselines. Due to the baseline + // selection or missing baselines, solutions may be available for less + // than the total number of station available in the observation. + const DPInfo &info = itsFilter.getInfo(); + const vector<int> &antennaUsed = info.antennaUsed(); + const Vector<String> &antennaNames = info.antennaNames(); + vector<BBS::Parm> parms; for(size_t dr = 0; dr < itsNModel; ++dr) { - for(size_t st = 0; st < getInfo().antennaNames().size(); ++st) { - string suffix (string(getInfo().antennaNames()[st]) + ':' + - itsAllSources[dr]); - parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, - "DirectionalGain:0:0:Real:" + suffix))); - parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, - "DirectionalGain:0:0:Imag:" + suffix))); - - parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, - "DirectionalGain:0:1:Real:" + suffix))); - parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, - "DirectionalGain:0:1:Imag:" + suffix))); - - parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, - "DirectionalGain:1:0:Real:" + suffix))); - parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, - "DirectionalGain:1:0:Imag:" + suffix))); - - parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, - "DirectionalGain:1:1:Real:" + suffix))); - parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, - "DirectionalGain:1:1:Imag:" + suffix))); + for(size_t st = 0; st < itsNStation; ++st) { + string name(antennaNames[antennaUsed[st]]); + string suffix(name + ":" + itsAllSources[dr]); + + parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, + "DirectionalGain:0:0:Real:" + suffix))); + parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, + "DirectionalGain:0:0:Imag:" + suffix))); + + parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, + "DirectionalGain:0:1:Real:" + suffix))); + parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, + "DirectionalGain:0:1:Imag:" + suffix))); + + parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, + "DirectionalGain:1:0:Real:" + suffix))); + parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, + "DirectionalGain:1:0:Imag:" + suffix))); + + parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, + "DirectionalGain:1:1:Real:" + suffix))); + parms.push_back(BBS::Parm(parmCache, parmSet.addParm(parmDB, + "DirectionalGain:1:1:Imag:" + suffix))); } } diff --git a/CEP/DP3/DPPP/src/EstimateNDPPP.cc b/CEP/DP3/DPPP/src/EstimateNDPPP.cc deleted file mode 100644 index a2cad2d85462ba8cbc1df02b2e797b3782248ac1..0000000000000000000000000000000000000000 --- a/CEP/DP3/DPPP/src/EstimateNDPPP.cc +++ /dev/null @@ -1,860 +0,0 @@ -//# EstimateNDPPP.cc: NDPPP specific variant of BBS estimation routines. -//# -//# Copyright (C) 2012 -//# ASTRON (Netherlands Institute for Radio Astronomy) -//# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands -//# -//# This file is part of the LOFAR software suite. -//# The LOFAR software suite is free software: you can redistribute it and/or -//# modify it under the terms of the GNU General Public License as published -//# by the Free Software Foundation, either version 3 of the License, or -//# (at your option) any later version. -//# -//# The LOFAR software suite is distributed in the hope that it will be useful, -//# but WITHOUT ANY WARRANTY; without even the implied warranty of -//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -//# GNU General Public License for more details. -//# -//# You should have received a copy of the GNU General Public License along -//# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. -//# -//# $Id$ - -#include <lofar_config.h> -#include <DPPP/EstimateNDPPP.h> -#include <BBSKernel/EstimateUtil.h> - -namespace LOFAR -{ -namespace DPPP -{ -using namespace BBS; - -namespace -{ - // State kept for a single cell in the solution grid. - struct Cell - { - // LSQ solver and current estimates for the coefficients. - casa::LSQFit solver; - vector<double> coeff; - }; - - template <typename T_SAMPLE_MODIFIER> - struct SampleProcessorComplex; - - JonesMatrix mix(const vector<JonesMatrix> &in, - const vector<casa::Array<casa::DComplex> > &coeff, - unsigned int target, - unsigned int nsources, - unsigned int baseline); - - void mix(const vector<JonesMatrix> &in, vector<JonesMatrix> &out, - const vector<casa::Array<casa::DComplex> > &coeff, - const Location &start, const Location &end, unsigned int baseline); - - FlagArray mergeFlags(const vector<JonesMatrix> &in); - void mixAndMerge(const Matrix &factor, const Element &in, Element &out); - - Matrix makeMixingFactor(const vector<casa::Array<casa::DComplex> > &coeff, - const Location &start, const Location &end, unsigned int baseline, - unsigned int correlation, unsigned int row, unsigned int column); - - void subtract2(vector<DPPP::DPBuffer> &buffer, - const vector<MeasurementExpr::Ptr> &models, - const vector<casa::Array<casa::DComplex> > &coeff, - unsigned int target, - unsigned int nsources, - const vector<pair<size_t, size_t> > &baselineMap, - const vector<pair<size_t, size_t> > &correlationMap); - - template <typename T_SAMPLE_PROCESSOR> - void equate(const Location &start, const Location &end, - const vector<vector<DPPP::DPBuffer> > &buffers, - const vector<MeasurementExpr::Ptr> &models, - const vector<casa::Array<casa::DComplex> > &coeff, - const vector<pair<size_t, size_t> > &baselineMap, - const vector<pair<size_t, size_t> > &correlationMap, - const vector<Interval<size_t> > (&cellMap)[2], - const map<PValueKey, unsigned int> &coeffMap, - vector<Cell> &cells); - - template <typename T_SAMPLE_PROCESSOR> - void equate2(const Location &start, const Location &end, size_t blIndex, - const vector<DPPP::DPBuffer> &obs, const JonesMatrix &sim, - const vector<pair<size_t, size_t> > &correlationMap, - const vector<Interval<size_t> > (&cellMap)[2], - const map<PValueKey, unsigned int> &coeffMap, - vector<Cell> &cells); - - void initCells(const Location &start, const Location &end, - const ParmGroup &solvables, size_t nCoeff, - const EstimateOptions &options, - vector<Cell> &cells); - - // Perform a single iteration for all cells in the range [\p start, \p end) - // that have not yet converged or failed, updating the coefficient values - // to the new estimates found. - // \pre The range starting at \p cell should contain exactly one Cell - // instance for each cell in the range [\p start, \p end]. - bool iterate(const Location &start, const Location &end, - const ParmGroup &solvables, const EstimateOptions &options, - vector<Cell> &cells); -} // unnamed namespace - -void estimate(const vector<vector<DPPP::DPBuffer> > &buffers, - const vector<MeasurementExpr::Ptr> &models, - const vector<casa::Array<dcomplex> > &coeff, - const BaselineSeq &baselines, - const CorrelationSeq &correlations, - const BaselineMask &baselineMask, - const CorrelationMask &correlationMask, - const Grid &visGrid, - const Grid &solGrid, - const EstimateOptions &options) -{ - // ========================================================================= - // CHECK PRECONDITIONS - // ========================================================================= - const size_t nDirections = buffers.size(); - const size_t nModels = models.size(); - { - ASSERT(nDirections >= nModels && nModels > 0); - ASSERT(int(nDirections) == coeff[0].shape()[0]); - ASSERT(int(nModels) == coeff[0].shape()[1]); - - CorrelationSeq tmp; - tmp.append(Correlation::XX); - tmp.append(Correlation::XY); - tmp.append(Correlation::YX); - tmp.append(Correlation::YY); - - for(size_t i = 0; i < nModels; ++i) - { - ASSERT(models[i]->baselines() == models.front()->baselines()); - ASSERT(models[i]->correlations() == tmp); - ASSERT(models[i]->domain().contains(visGrid.getBoundingBox())); - } - } - // ========================================================================= - - // Construct a sequence of pairs of indices of matching baselines (i.e. - // baselines common to both buffer and model). - vector<pair<size_t, size_t> > blMap; - makeIndexMap(baselines, models.front()->baselines(), baselineMask, - back_inserter(blMap)); - - // Construct a sequence of pairs of indices of matching correlations - // (i.e. correlations known by both buffer and model). - vector<pair<size_t, size_t> > crMap; - makeIndexMap(correlations, models.front()->correlations(), correlationMask, - back_inserter(crMap)); - - // Compute a mapping from cells of the solution grid to cell intervals - // in the evaluation grid. - vector<Interval<size_t> > cellMap[2]; - Interval<size_t> domain[2]; - domain[FREQ] = makeAxisMap(solGrid[FREQ], visGrid[FREQ], - back_inserter(cellMap[FREQ])); - domain[TIME] = makeAxisMap(solGrid[TIME], visGrid[TIME], - back_inserter(cellMap[TIME])); - - ParmGroup solvables; - for(size_t i = 0; i < nModels; ++i) - { - ParmGroup tmp = models[i]->solvables(); - solvables.insert(tmp.begin(), tmp.end()); - } - - // Make coefficient map. - map<PValueKey, unsigned int> coeffMap; - makeCoeffMap(solvables, inserter(coeffMap, coeffMap.begin())); - LOG_DEBUG_STR("No. of coefficients to estimate: " << coeffMap.size()); - - // Assign solution grid to solvables. - ParmManager::instance().setGrid(solGrid, solvables); - - // --------------------------------------------------------------------- - // Process each chunk of cells in a loop. - // --------------------------------------------------------------------- - - // Clip chunk size to the size of the solution grid. - size_t chunkSize = options.chunkSize() == 0 ? solGrid[TIME]->size() - : std::min(options.chunkSize(), solGrid[TIME]->size()); - - // Allocate cells. - vector<Cell> cells(solGrid[FREQ]->size() * chunkSize); - - // Compute the number of cell chunks to process. - size_t nChunks = (solGrid[TIME]->size() + chunkSize - 1) / chunkSize; - - // Process the solution grid in chunks. - for(size_t chunk = 0; chunk < nChunks; ++chunk) - { - NSTimer timerChunk, timerEquate, timerIterate; - timerChunk.start(); - - // Compute cell chunk boundaries in solution grid coordinates. - Location chunkStart(0, chunk * chunkSize); - Location chunkEnd(solGrid[FREQ]->size() - 1, - std::min(chunkStart.second + chunkSize - 1, - solGrid[TIME]->size() - 1)); - - // Adjust cell chunk boundaries to exclude those cells for which no - // visibility data is available. - chunkStart = - Location(std::max(chunkStart.first, domain[FREQ].start), - std::max(chunkStart.second, domain[TIME].start)); - chunkEnd = - Location(std::min(chunkEnd.first, domain[FREQ].end), - std::min(chunkEnd.second, domain[TIME].end)); - - // If there are no cells for which visibility data is available, - // skip the chunk. - if(chunkStart.first > chunkEnd.first - || chunkStart.second > chunkEnd.second) - { - timerChunk.stop(); - continue; - } - - // Ensure a model value is computed for all the visibility samples - // within the chunk. - Location reqStart(cellMap[FREQ][chunkStart.first].start, - cellMap[TIME][chunkStart.second].start); - Location reqEnd(cellMap[FREQ][chunkEnd.first].end, - cellMap[TIME][chunkEnd.second].end); - for(size_t i = 0; i < nModels; ++i) - { - models[i]->setEvalGrid(visGrid.subset(reqStart, reqEnd)); - } - - // Initialize a cell instance for each cell in [chunkEnd, - // chunkStart]. - initCells(chunkStart, chunkEnd, solvables, coeffMap.size(), options, - cells); - - typedef SampleProcessorComplex<SampleModifierComplex> SampleProcessor; - - bool done = false; - unsigned int nIterations = 0; - while(!done) - { - // Construct normal equations from the data and an evaluation of - // the model based on the current coefficient values. - timerEquate.start(); - equate<SampleProcessor>(chunkStart, chunkEnd, buffers, models, - coeff, blMap, crMap, cellMap, coeffMap, cells); - timerEquate.stop(); - - // Perform a single iteration. - timerIterate.start(); - done = iterate(chunkStart, chunkEnd, solvables, options, cells); - timerIterate.stop(); - - // Notify model that solvables have changed. - for(size_t i = 0; i < nModels; ++i) - { - models[i]->solvablesChanged(); - } - - // Update iteration count. - ++nIterations; - } - timerChunk.stop(); - - // Output statistics and timers. - const size_t nCells = (chunkEnd.second - chunkStart.second + 1) - * (chunkEnd.first - chunkStart.first + 1); - LOG_DEBUG_STR("chunk: " << (chunk + 1) << "/" << nChunks - << " cells: " << nCells << " iterations: " << nIterations); - LOG_DEBUG_STR("\ttimers: all: " << toString(timerChunk) - << " equate: " << toString(timerEquate) << " iterate: " - << toString(timerIterate) << " total/count/average"); - - // Propagate solutions to the next chunk if required. - if(options.propagate() && (chunk + 1) < nChunks) - { - Location srcStart(0, chunk * chunkSize); - Location srcEnd(solGrid[FREQ]->size() - 1, - srcStart.second + chunkSize - 1); - - Location destStart(0, (chunk + 1) * chunkSize); - Location destEnd(solGrid[FREQ]->size() - 1, - std::min(destStart.second + chunkSize - 1, - solGrid[TIME]->size() - 1)); - - passCoeff(solvables, srcStart, srcEnd, destStart, destEnd); - } - } -} - -void subtract(vector<DPPP::DPBuffer> &buffer, - const vector<BBS::MeasurementExpr::Ptr> &models, - const vector<casa::Array<casa::DComplex> > &coeff, - const BBS::BaselineSeq &baselines, - const BBS::CorrelationSeq &correlations, - const BBS::BaselineMask &baselineMask, - const BBS::CorrelationMask &correlationMask, - const BBS::Grid &visGrid, - unsigned int target, - unsigned int nsources) -{ - // ========================================================================= - // CHECK PRECONDITIONS - // ========================================================================= - { - ASSERT(nsources <= models.size()); - ASSERT(buffer.size() > 0); - ASSERT(int(target) < coeff[0].shape()[1]); - - CorrelationSeq tmp; - tmp.append(Correlation::XX); - tmp.append(Correlation::XY); - tmp.append(Correlation::YX); - tmp.append(Correlation::YY); - - for(size_t i = 0; i < nsources; ++i) - { - ASSERT(models[i]->baselines() == models.front()->baselines()); - ASSERT(models[i]->correlations() == tmp); - ASSERT(models[i]->domain().contains(visGrid.getBoundingBox())); - } - } - // ========================================================================= - - // Construct a sequence of pairs of indices of matching baselines (i.e. - // baselines common to both buffer and model). - vector<pair<size_t, size_t> > blMap; - makeIndexMap(baselines, models.front()->baselines(), baselineMask, - back_inserter(blMap)); - - // Construct a sequence of pairs of indices of matching correlations - // (i.e. correlations known by both buffer and model). - vector<pair<size_t, size_t> > crMap; - makeIndexMap(correlations, models.front()->correlations(), correlationMask, - back_inserter(crMap)); - - for(size_t i = 0; i < nsources; ++i) - { - models[i]->setEvalGrid(visGrid); - } - - subtract2(buffer, models, coeff, target, nsources, blMap, crMap); -} - - -namespace -{ - -template <typename T_SAMPLE_PROCESSOR> -void equate(const Location &start, const Location &end, - const vector<vector<DPPP::DPBuffer> > &buffers, - const vector<MeasurementExpr::Ptr> &models, - const vector<casa::Array<casa::DComplex> > &coeff, - const vector<pair<size_t, size_t> > &baselineMap, - const vector<pair<size_t, size_t> > &correlationMap, - const vector<Interval<size_t> > (&cellMap)[2], - const map<PValueKey, unsigned int> &coeffMap, - vector<Cell> &cells) - -{ - ASSERT(buffers.size() >= models.size()); - - const size_t nDirections = buffers.size(); - const size_t nModels = models.size(); - - vector<JonesMatrix> sim(nModels); - vector<JonesMatrix> mixed(nDirections); - - typedef vector<pair<size_t, size_t> >::const_iterator bl_iterator; - for(bl_iterator it = baselineMap.begin(), it_end = baselineMap.end(); - it != it_end; ++it) - { - // Evaluate models. - for(size_t i = 0; i < nModels; ++i) - { - sim[i] = models[i]->evaluate(it->second); - } - - // Mix - Location visStart(cellMap[FREQ][start.first].start, - cellMap[TIME][start.second].start); - Location visEnd(cellMap[FREQ][end.first].end, - cellMap[TIME][end.second].end); - mix(sim, mixed, coeff, visStart, visEnd, it->first); - - // Flags will be equal for all mixed simulations. Skip baseline if all - // grid points are flagged. - if(sim.front().hasFlags()) - { - FlagArray flags(sim.front().flags()); - if(flags.rank() == 0 && (*flags.begin() != 0)) - { - continue; - } - } - - // Equate. - for(size_t i = 0; i < nDirections; ++i) - { - equate2<T_SAMPLE_PROCESSOR>(start, end, it->first, buffers[i], mixed[i], correlationMap, - cellMap, coeffMap, cells); - } - } // baselines -} - -template <typename T_SAMPLE_PROCESSOR> -void equate2(const Location &start, const Location &end, size_t blIndex, - const vector<DPPP::DPBuffer> &obs, const JonesMatrix &sim, - const vector<pair<size_t, size_t> > &correlationMap, - const vector<Interval<size_t> > (&cellMap)[2], - const map<PValueKey, unsigned int> &coeffMap, - vector<Cell> &cells) -{ -// // can we somehow properly clear() if nUnkowns does not change? -// // instead of using set() which reallocates? - - const unsigned int nFreq = cellMap[FREQ][end.first].end - - cellMap[FREQ][start.first].start + 1; - const unsigned int nTime = cellMap[TIME][end.second].end - - cellMap[TIME][start.second].start + 1; - - double *reSim = 0, *imSim = 0; - vector<unsigned int> coeffIndex(coeffMap.size()); - vector<double> reDerivative(coeffMap.size()); - vector<double> imDerivative(coeffMap.size()); - vector<double*> reSimDerivative(coeffMap.size(), 0); - vector<double*> imSimDerivative(coeffMap.size(), 0); - - typedef vector<pair<size_t, size_t> >::const_iterator cr_iterator; - for(cr_iterator cr_it = correlationMap.begin(), cr_end = correlationMap.end(); - cr_it != cr_end; ++cr_it) - { - const Element element = sim.element(cr_it->second); - if(element.size() <= 1) - { - continue; - } - - const size_t nCoeff = element.size() - 1; - - // ----------------------------------------------------------------- - // Setup pointers and strides to access the model value and - // derivatives. - // ----------------------------------------------------------------- - Matrix sim(element.value()); - ASSERT(sim.isComplex() && sim.isArray() - && static_cast<unsigned int>(sim.nx()) == nFreq - && static_cast<unsigned int>(sim.ny()) == nTime); - sim.dcomplexStorage(reSim, imSim); - - size_t i = 0; - for(Element::const_iterator el_it = element.begin(), - el_end = element.end(); el_it != el_end; ++el_it, ++i) - { - // Look-up coefficient index for this coefficient. - map<PValueKey, unsigned int>::const_iterator coeff_it = - coeffMap.find(el_it->first); - ASSERT(coeff_it != coeffMap.end()); - coeffIndex[i] = coeff_it->second; - - // Get pointers to the real and imaginary part of the partial - // derivarive of the model with respect to this coefficient. - Matrix derivative(el_it->second); - ASSERT(derivative.isComplex() && derivative.isArray() - && static_cast<unsigned int>(derivative.nx()) == nFreq - && static_cast<unsigned int>(derivative.ny()) == nTime); - derivative.dcomplexStorage(reSimDerivative[i], imSimDerivative[i]); - } - - size_t offset[2]; - offset[FREQ] = cellMap[FREQ][start.first].start; - offset[TIME] = cellMap[TIME][start.second].start; - - vector<Cell>::iterator cell = cells.begin(); - for(CellIterator it(start, end); !it.atEnd(); ++it, ++cell) - { - if(cell->solver.isReady()) - { - // Skip cell if it is inactive (converged or failed). - continue; - } - - const Interval<size_t> &freqInterval = cellMap[FREQ][it->first]; - const Interval<size_t> &timeInterval = cellMap[TIME][it->second]; - - size_t index = (timeInterval.start - offset[TIME]) * nFreq - + (freqInterval.start - offset[FREQ]); - - for(size_t t = timeInterval.start; t <= timeInterval.end; ++t) - { - const DPPP::DPBuffer &buffer = obs[t]; - - for(size_t f = freqInterval.start; f <= freqInterval.end; ++f) - { - const bool &flagged = buffer.getFlags()(cr_it->first, f, blIndex); - if(!flagged) - { - for(size_t i = 0; i < nCoeff; ++i) - { - reDerivative[i] = reSimDerivative[i][index]; - imDerivative[i] = imSimDerivative[i][index]; - } - - const fcomplex &vis = buffer.getData()(cr_it->first, f, blIndex); - const float &weight = buffer.getWeights()(cr_it->first, f, blIndex); - - T_SAMPLE_PROCESSOR::process(*cell, weight, real(vis), - imag(vis), reSim[index], imSim[index], nCoeff, - &(reDerivative[0]), &(imDerivative[0]), - &(coeffIndex[0])); - } - - ++index; - } - - index -= (freqInterval.end - freqInterval.start + 1); - index += nFreq; - } - } - } -} - -void subtract2(vector<DPPP::DPBuffer> &buffer, - const vector<MeasurementExpr::Ptr> &models, - const vector<casa::Array<casa::DComplex> > &coeff, - unsigned int target, - unsigned int nsources, - const vector<pair<size_t, size_t> > &baselineMap, - const vector<pair<size_t, size_t> > &correlationMap) -{ - vector<JonesMatrix> sim(nsources); - - typedef vector<pair<size_t, size_t> >::const_iterator - index_map_iterator; - - for(index_map_iterator bl_it = baselineMap.begin(), - bl_end = baselineMap.end(); bl_it != bl_end; ++bl_it) - { - // Evaluate models. - for(unsigned int i = 0; i < nsources; ++i) - { - sim[i] = models[i]->evaluate(bl_it->second); - } - - // Mix - JonesMatrix mixed = mix(sim, coeff, target, nsources, bl_it->first); - - // Subtract. - for(index_map_iterator cr_it = correlationMap.begin(), - cr_end = correlationMap.end(); cr_it != cr_end; ++cr_it) - { - Matrix crMixed = mixed.element(cr_it->second).value(); - ASSERT(!crMixed.isNull()); - - const unsigned int nFreq = crMixed.nx(); - const unsigned int nTime = crMixed.ny(); - ASSERT(crMixed.isComplex() && crMixed.isArray()); - ASSERTSTR(nTime == buffer.size(), "nTime: " << nTime << " buffer size: " << buffer.size()); - - const double *mixed_re = 0, *mixed_im = 0; - crMixed.dcomplexStorage(mixed_re, mixed_im); - - for(vector<DPBuffer>::iterator buffer_it = buffer.begin(), - buffer_end = buffer.end(); buffer_it != buffer_end; - ++buffer_it) - { - casa::Cube<casa::Complex> &data = buffer_it->getData(); - ASSERT(data.shape()(1) == int(nFreq)); - - for(size_t i = 0; i < nFreq; ++i) - { - data(cr_it->first, i, bl_it->first) -= - makedcomplex(*mixed_re++, *mixed_im++); - } // frequency - } // time - } // correlations - } // baselines -} - -JonesMatrix mix(const vector<JonesMatrix> &in, - const vector<casa::Array<casa::DComplex> > &coeff, - unsigned int target, - unsigned int nsources, - unsigned int baseline) -{ - const unsigned int nFreq = coeff.front().shape()(3); - const unsigned int nTime = coeff.size(); - - ASSERT(nFreq >= 1 && nTime >= 1); - const Location start(0, 0); - const Location end(nFreq - 1, nTime - 1); - - Matrix out[4]; - for(unsigned int i = 0; i < nsources; ++i) - { - for(unsigned int correlation = 0; correlation < 4; ++correlation) - { - // Exchanged target and i, because we want the effect of - // direction i on the target direction. - Matrix weight = makeMixingFactor(coeff, start, end, baseline, - correlation, target, i); - - const Matrix sim = in[i].element(correlation).value(); - - ASSERTSTR(sim.nx() == weight.nx(), "sim: " << sim.nx() << " weight: " << weight.nx()); - ASSERTSTR(sim.ny() == weight.ny(), "sim: " << sim.ny() << " weight: " << weight.ny()); - if(out[correlation].isNull()) - { - out[correlation] = weight * sim; - } - else - { - out[correlation] += weight * sim; - } - } // correlations - } // nsources - - JonesMatrix result(out[0], out[1], out[2], out[3]); - result.setFlags(mergeFlags(in)); - return result; -} - -void mix(const vector<JonesMatrix> &in, vector<JonesMatrix> &out, - const vector<casa::Array<casa::DComplex> > &coeff, - const Location &start, const Location &end, unsigned int baseline) -{ - // dims array: ndir x nmodel x ncorr x nchan x nbl (minor -> major). - // better dims: nbl x ncorr x ndir x nmodel x nchan ??? - - const unsigned int nModels = in.size(); - const unsigned int nDirections = coeff[0].shape()[0]; - ASSERT(nDirections == out.size()); - ASSERT(int(nModels) == coeff[0].shape()[1]); - - FlagArray flags = mergeFlags(in); - - for(unsigned int i = 0; i < nDirections; ++i) - { - Element element[4]; - for(unsigned int j = 0; j < nModels; ++j) - { - for(unsigned int k = 0; k < 4; ++k) - { - Matrix factor = makeMixingFactor(coeff, start, end, - baseline, k, i, j); - - mixAndMerge(factor, in[j].element(k), element[k]); - } - } - - out[i] = JonesMatrix(element[0], element[1], element[2], - element[3]); - out[i].setFlags(flags); - } -} - -FlagArray mergeFlags(const vector<JonesMatrix> &in) -{ - vector<JonesMatrix>::const_iterator first = in.begin(); - vector<JonesMatrix>::const_iterator last = in.end(); - - for(; first != last && !first->hasFlags(); ++first) - { - } - - if(first == last) - { - return FlagArray(); - } - - FlagArray flags = first->flags().clone(); - ++first; - - for(; first != last; ++first) - { - if(first->hasFlags()) - { - flags |= first->flags(); - } - } - - return flags; -} - -void mixAndMerge(const Matrix &factor, const Element &in, Element &out) -{ - // Update value. - Matrix value = out.value(); - if(value.isNull()) - { - out.assign(factor * in.value()); - } - else - { - value += factor * in.value(); - } - - // Update partial derivatives. - Element::const_iterator inIter = in.begin(); - Element::const_iterator inEnd = in.end(); - - Element::iterator outIter = out.begin(); - Element::iterator outEnd = out.end(); - - while(inIter != inEnd && outIter != outEnd) - { - if(outIter->first == inIter->first) - { - outIter->second += factor * inIter->second; - ++inIter; - ++outIter; - } - else if(outIter->first < inIter->first) - { - ++outIter; - } - else - { - out.assign(inIter->first, factor * inIter->second); - ++inIter; - } - } - - while(inIter != inEnd) - { - out.assign(inIter->first, factor * inIter->second); - ++inIter; - } -} - -Matrix makeMixingFactor(const vector<casa::Array<casa::DComplex> > &coeff, - const Location &start, const Location &end, unsigned int baseline, - unsigned int correlation, unsigned int row, unsigned int column) -{ - const unsigned int nFreq = end.first - start.first + 1; - const unsigned int nTime = end.second - start.second + 1; - - Matrix factor(makedcomplex(0.0, 0.0), nFreq, nTime, false); - double *re = 0, *im = 0; - factor.dcomplexStorage(re, im); - - // dims coeff: ndir x nmodel x ncorr x nchan x nbl (minor -> major). - // nmodel = nr of directions with source model (thus excl. target) - casa::IPosition index(5, row, column, correlation, 0, baseline); - for(unsigned int t = start.second; t <= end.second; ++t) - { - const casa::Array<casa::DComplex> &tmp = coeff[t]; - ASSERTSTR(tmp.shape()(3) == int(nFreq), "nFreq: " - << nFreq << ' ' << tmp.shape()); - for(index(3) = start.first; index(3) <= static_cast<int>(end.first); - ++index(3)) - { - const casa::DComplex &weight = tmp(index); - *re++ = real(weight); - *im++ = imag(weight); - } - } - - return factor; -} - -template <typename T_SAMPLE_MODIFIER> -struct SampleProcessorComplex -{ - static inline void process(Cell &cell, double weight, double reObs, - double imObs, double reSim, double imSim, unsigned int nDerivative, - double *reDerivative, double *imDerivative, - const unsigned int *index) - { - // Modify the observed and simulated data depending on the solving - // mode (complex, phase only, amplitude only). - T_SAMPLE_MODIFIER::process(weight, reObs, imObs, reSim, imSim, - reDerivative, imDerivative, nDerivative); - - if(weight == 0.0) - { - return; - } - - // Compute the residual. - double reResidual = reObs - reSim; - double imResidual = imObs - imSim; - - // Update the normal equations. - cell.solver.makeNorm(nDerivative, index, reDerivative, weight, - reResidual); - cell.solver.makeNorm(nDerivative, index, imDerivative, weight, - imResidual); - } -}; - -void initCells(const Location &start, const Location &end, - const ParmGroup &solvables, size_t nCoeff, - const EstimateOptions &options, - vector<Cell> &cells) -{ - vector<Cell>::iterator cell = cells.begin(); - for(CellIterator it(start, end); !it.atEnd(); ++it, ++cell) - { - // Initalize LSQ solver. - cell->solver = casa::LSQFit(static_cast<casa::uInt>(nCoeff)); - configLSQSolver(cell->solver, options.lsqOptions()); - - // Initialize coefficients. - cell->coeff.resize(nCoeff); - loadCoeff(*it, solvables, cell->coeff.begin()); - } -} - -bool iterate(const Location &start, const Location &end, - const ParmGroup &solvables, const EstimateOptions &options, - vector<Cell> &cells) -{ - const size_t nCellFreq = end.first - start.first + 1; - const size_t nCellTime = end.second - start.second + 1; - const size_t nCell = nCellFreq * nCellTime; - - bool done = true; -#pragma omp parallel for - for(size_t i = 0; i < nCell; ++i) - { - Cell &cell = cells[i]; - - // If processing on the cell is already done, only update the status - // counts and continue to the next cell. - if(cell.solver.isReady()) - { - continue; - } - - // Perform a single iteration if the cell has not yet converged or - // failed. - // - // LSQFit::solveLoop() only returns false if the normal - // equations are singular. This can also be seen from the result - // of LSQFit::isReady(), so we don't update the iteration status - // here but do skip the update of the solvables. - casa::uInt rank; - if(cell.solver.solveLoop(rank, &(cell.coeff[0]), - options.lsqOptions().useSVD)) - { - // Store the updated coefficient values. - storeCoeff(Location(start.first + i % nCellFreq, start.second + i - / nCellFreq), solvables, cell.coeff.begin()); - } - - if(!cell.solver.isReady()) - { - done = false; - } - } - - return done; -} - -} // unnamed namespace - -} //# namespace BBS -} //# namespace LOFAR diff --git a/CEP/DP3/DPPP/src/Filter.cc b/CEP/DP3/DPPP/src/Filter.cc index 9ab9a6274772e99ddc9750aea4493f02ffdc8812..9b4bb455897a5cd11f483f02ef27d8ac38481ae4 100644 --- a/CEP/DP3/DPPP/src/Filter.cc +++ b/CEP/DP3/DPPP/src/Filter.cc @@ -134,8 +134,8 @@ namespace LOFAR { itsTimer.start(); if (!itsDoSelect) { itsBuf = buf; // uses reference semantics - itsTimer.stop(); - getNextStep()->process (itsBuf); + itsTimer.stop(); + getNextStep()->process (itsBuf); return true; } // Make sure no other object references the DATA and UVW arrays. diff --git a/CEP/Imager/LofarFT/src/fillRootImageGroup.cc b/CEP/Imager/LofarFT/src/fillRootImageGroup.cc index 7e34089181612bcf1039d61d264e14b083537331..93893cc485626d3b88e4bda17879336dda0b21ce 100644 --- a/CEP/Imager/LofarFT/src/fillRootImageGroup.cc +++ b/CEP/Imager/LofarFT/src/fillRootImageGroup.cc @@ -25,6 +25,8 @@ //# //# $Id$ +#include <lofar_config.h> + #include <casa/Containers/Record.h> #include <casa/HDF5/HDF5File.h> #include <casa/HDF5/HDF5Group.h> diff --git a/CEP/LMWCommon/src/cexecms b/CEP/LMWCommon/src/cexecms index b6be7ec9103122108afec22094a4f6a9e111bb72..a1419b49d91f90d25978223398c05e523c14b13d 100755 --- a/CEP/LMWCommon/src/cexecms +++ b/CEP/LMWCommon/src/cexecms @@ -8,15 +8,16 @@ showhelp() { echo '' echo ' cexecms runs a command or script on cluster nodes for files matching the' - echo ' given file name glob pattern. Placeholders in command or script are' - echo ' replaced by the actual file name.' + echo ' given file name glob pattern. Placeholders in the command or script are' + echo ' replaced by the actual file name parts.' echo '' echo ' usage:' echo ' cexecms [-c cluster] [-d] [-s script] [-w workdir] command nameglob [arg1 arg2 ...]' echo '' echo ' -c cluster Cluster name as defined for cexec.' - echo ' default is lce: if run on an lfe node,' - echo ' test: if run on lce072, otherwise locus:' + echo ' default is lce: if run on an lfe node' + echo ' test: if run on lce072' + echo ' locus: otherwise' echo ' -d Do a dryrun.' echo ' (do not execute, but only print the command/script)' echo ' -i ids List of ids to replace <ID> in the nameglob argument.' @@ -31,9 +32,12 @@ showhelp() echo ' command Command to be executed remotely.' echo ' Quotes are needed if it contains spaces, etc.' echo ' Placeholders (like <FN>) in the command are replaced.' - echo ' nameglob File name glob pattern (# is a shorthand for [0-9]).' + echo ' nameglob File name glob pattern to find matching files' + echo ' # can be used as a shorthand for [0-9].' echo ' E.g., one can use SB### meaning any subband.' - echo ' arg1 arg2 .. Optional extra arguments to be given to command.' + echo ' The pattern can contain the placeholder <ID> as explained' + echo ' above in the -i option.' + echo ' arg1 arg2 .. Optional extra arguments to be given to the command.' echo '' echo ' Using cexec, the script cexecms-part is executed on the given cluster' echo ' nodes. It looks for files matching the given file name glob pattern.' @@ -46,6 +50,12 @@ showhelp() echo ' <DIRNAME> or <DN> for the directory part' echo ' The first two can be followed by a . (e.g. <FN.>) meaning that the' echo ' basename is used till the first dot (thus the extension is removed.)' + echo ' Similarly, <.BN> gives the extension (thus after the first dot).' + echo ' For standard LOFAR file names the following placeholders can also be used:' + echo ' <OBSID> for the obsid part of <BN.> (till first _)' + echo ' <SAP> for the subarray pointing part of <BN.> (till next _)' + echo ' <SB> for the subband part of <BN.> (till next _)' + echo ' <TYPE> for the dataset type part of <BN.> (after last _)' echo '' echo ' If -s is given, the command is executed like:' echo ' command script arg1 arg2 ..' @@ -68,7 +78,7 @@ showhelp() echo ' Also note that (t)csh requires a ! to be escaped with a backslash.' echo '' echo ' Sometimes a command can be dangerous or take a long time to run.' - echo ' In such a case it makes sense to execute it first with the -d option.' + echo ' In such a case it makes sense to first do a dry-run execution with the -d option.' echo '' echo ' Note that the current environment (paths, etc.) is copied. You should' echo ' have done "use LofIm" if you need LofIm in the (remote) command.' diff --git a/CEP/LMWCommon/src/cexecms-part b/CEP/LMWCommon/src/cexecms-part index 48b7e9268cfd96e88e62336177175f971ac94d36..4f68ccce6a9531689e2c78e024ae273083a20368 100755 --- a/CEP/LMWCommon/src/cexecms-part +++ b/CEP/LMWCommon/src/cexecms-part @@ -11,6 +11,10 @@ # <BASENAME> or <BN> by the basename part # <DIRNAME> or <DN> by the directory part # The first two can be followed by a . meaning that the extension is ignored. +# <OBSID> by the obsid part of <BN.> (till first _) +# <SAP> by the subarray pointing part of <BN.> (till next _) +# <SB> by the subband part of <BN.> (till next _) +# <TYPE> by the dataset type part of <BN.> (after last _) # If no substitutions are done, the command is executed like # command filename arg1 arg2 ... # otherwise like @@ -125,10 +129,15 @@ command=`echo "$command" | sed \ for fname in $names do # Form the various placeholder replacements. - dname=`dirname $fname` # directory - bname=`basename $fname` # basename - bnamed=`echo $bname | sed -e 's%\..*%%'` # basename without extension - fnamed=$dname/$bnamed # full name without extension + dname=`dirname $fname` # directory + bname=`basename $fname` # basename + bnamed=`echo $bname | sed -e 's%\..*%%'` # basename without extension + fnamed=$dname/$bnamed # full name without extension + ename=`echo $bname | sed -e "s%$bnamed\.%%"` # extension + obsid=`echo $bnamed | awk -F_ '{print $1}'` + sap=`echo $bnamed | awk -F_ '{print $2}'` + sb=`echo $bnamed | awk -F_ '{print $3}'` + type=`echo $bnamed | awk -F_ '{print $4}'` # Replace the placeholders. commandnew=`echo "$command" | sed \ -e "s%<FN>%$fname%g" \ @@ -137,12 +146,18 @@ do -e "s%<FN\.>%$fnamed%g" \ -e "s%<DN\.>%$dnamed%g" \ -e "s%<BN\.>%$bnamed%g" \ + -e "s%<\.BN>%$ename%g" \ -e "s%<FILENAME>%$fname%g" \ -e "s%<DIRNAME>%$dname%g" \ -e "s%<BASENAME>%$bname%g" \ -e "s%<FILENAME\.>%$fnamed%g" \ -e "s%<DIRNAME\.>%$dnamed%g" \ - -e "s%<BASENAME\.>%$bnamed%g"` + -e "s%<BASENAME\.>%$bnamed%g" \ + -e "s%<\.BASENAME>%$ename%g" \ + -e "s%<OBSID>%$obsid%g" \ + -e "s%<SAP>%$sap%g" \ + -e "s%<SB>%$sb%g" \ + -e "s%<TYPE>%$type%g"` if test "$script" != ""; then # A script is given, replace placeholders in there as well. psname=$HOME/`basename $script`-$USER-$$ @@ -153,12 +168,18 @@ do -e "s%<FN\.>%$fnamed%g" \ -e "s%<DN\.>%$dnamed%g" \ -e "s%<BN\.>%$bnamed%g" \ + -e "s%<\.BN>%$ename%g" \ -e "s%<FILENAME>%$fname%g" \ -e "s%<DIRNAME>%$dname%g" \ -e "s%<BASENAME>%$bname%g" \ -e "s%<FILENAME\.>%$fnamed%g" \ -e "s%<DIRNAME\.>%$dnamed%g" \ -e "s%<BASENAME\.>%$bnamed%g" \ + -e "s%<\.BASENAME>%$ename%g" \ + -e "s%<OBSID>%$obsid%g" \ + -e "s%<SAP>%$sap%g" \ + -e "s%<SB>%$sb%g" \ + -e "s%<TYPE>%$type%g" \ $script > $psname if test $dryrun = 1; then echo "Dryrun: " $commandnew $psname "$@" diff --git a/CEP/PyBDSM/doc/source/conf.py b/CEP/PyBDSM/doc/source/conf.py index 73644164ae4ff956cf20b8936a99199dfd767cca..16ba5fc5e1ff9e0d96d36887fd0efd2821c2dc3e 100644 --- a/CEP/PyBDSM/doc/source/conf.py +++ b/CEP/PyBDSM/doc/source/conf.py @@ -50,7 +50,7 @@ copyright = u'2012, David Rafferty and Niruj Mohan' # The short X.Y version. version = '1.4' # The full version, including alpha/beta/rc tags. -release = '1.4.2' +release = '1.4.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/CEP/PyBDSM/doc/source/export_image.rst b/CEP/PyBDSM/doc/source/export_image.rst index 9ac02573329e382046b43a04c05a616d4f551450..ac353ad8406256f3cb9c77104dd931c9de295dca 100644 --- a/CEP/PyBDSM/doc/source/export_image.rst +++ b/CEP/PyBDSM/doc/source/export_image.rst @@ -18,7 +18,8 @@ Internally derived images (e.g, the Gaussian model image) can be exported to FIT moment only 'fits' is supported) :term:`img_type` ....... 'gaus_resid': Type of image to export: 'gaus_resid', 'shap_resid', 'rms', 'mean', 'gaus_model', - 'shap_model', 'ch0', 'pi' + 'shap_model', 'ch0', 'pi', 'psf_major', 'psf_minor', + 'psf_pa', 'psf_ratio', 'psf_ratio_aper' Each of the parameters is described in detail below. @@ -53,3 +54,13 @@ Each of the parameters is described in detail below. * ``'shap_model'`` - Shapelet model image + * ``'psf_major'`` - image of major axis FWHM variation (arcsec) + + * ``'psf_minor'`` - image of minor axis FWHM variation (arcsec) + + * ``'psf_pa'`` - image of position angle variation (degrees east of north) + + * ``'psf_ratio'`` - image of peak-to-total flux variation (1/beam) + + * ``'psf_ratio_aper'`` - image of peak-to-aperture flux variation (1/beam) + diff --git a/CEP/PyBDSM/doc/source/process_image.rst b/CEP/PyBDSM/doc/source/process_image.rst index 1fe0b26dcc080c510a2c560e563b0a6d84180732..d7c365b9151d6adf9abb6ee7c6803d26d116ea14 100644 --- a/CEP/PyBDSM/doc/source/process_image.rst +++ b/CEP/PyBDSM/doc/source/process_image.rst @@ -850,7 +850,7 @@ If ``psf_vary_do = True``, then the spatial variations in the PSF are estimated * The unresolved sources within each tile that have SNRs greater than ``psf_snrcutstack`` are then stacked to form a high-SNR PSF. For each tile, this PSF is fit with a Gaussian to recover its size. The significance of the variation in the sizes across the image is quantified. -* If the variation is significant, the major axis, minor axis, and position angle are then interpolated across the image. Where there is sufficient information, the interpolation is done using Delaunay triangulation; otherwise, the values within the tiles defined by tessellation are simply set to those of the appropriate PSF. +* If the variation is significant, the major axis, minor axis, and position angle are then interpolated across the image. Smoothing can be applied to these images to smooth out artifacts due to noise and the interpolation. Additionally, images are made of the ratio of peak-to-total flux and peak-to-aperture flux (if an aperture is specified). These ratio images provide conversions from total flux to peak flux for point sources. In the absence of smearing effects, these ratios should be around unity. However, if ionospheric effects are present, significant smearing can be present. In this case, these ratio images can be useful, for example, in determining the sensitivity at a particular location in the image to a point source with a given total flux. * Lastly, the deconvolved source sizes are adjusted to include the PSF variation as a function of position. @@ -868,6 +868,8 @@ The options for this module are as follows: :term:`psf_nsig` ............. 3.0 : Kappa for clipping within each bin :term:`psf_over` ............... 2 : Factor of nyquist sample for binning bmaj, etc. vs SNR + :term:`psf_smooth` .......... None : Size of Gaussian to use for smoothing of + interpolated images in arcsec. None => no smoothing :term:`psf_snrcut` .......... 10.0 : Minimum SNR for statistics :term:`psf_snrcutstack` ..... 15.0 : Unresolved sources with higher SNR taken for stacked psfs @@ -906,6 +908,9 @@ The options for this module are as follows: psf_over This parameter is an integer (default is 2). When constructing a set of 'unresolved' sources for psf estimation, this parameter controls the factor of nyquist sample for binning bmaj, etc. vs SNR. + psf_smooth + This parameter is a float (default is ``None``) that sets the smoothing scale used to smooth the interpolated images. Generally, artifacts due to noise and the interpolation can be significantly reduced if the smoothing scale is similar to the typical source separation scale. + psf_snrcut This parameter is a float (default is 10.0). Only Gaussians with SNR greater than this are considered for processing. The minimum value is 5.0 diff --git a/CEP/PyBDSM/doc/source/whats_new.rst b/CEP/PyBDSM/doc/source/whats_new.rst index c9f2272b06ea838391abaf14a452c7ddbe889441..08df13c8aa4bfc116f3c30da44d369ac5c2caf72 100644 --- a/CEP/PyBDSM/doc/source/whats_new.rst +++ b/CEP/PyBDSM/doc/source/whats_new.rst @@ -4,6 +4,12 @@ What's New ********** +Version 1.4.3 (2012/10/04): + + * Fixed a bug in the mean map calculation that caused mean maps with constant values (i.e., non-2D maps) to have values of 0.0 Jy/beam unless "mean_map = 'const'" was explicitly specified. + + * Fixed a bug in the PSF vary module that resulted in incorrect PSF generators being used. Added an option to smooth the resulting PSF images ("psf_smooth"). Parallelized the PSF interpolation and smoothing steps. Improved PSF vary documentation. + Version 1.4.2 (2012/09/25): * Dramatically reduced time required to identify valid wavelet islands. Fixed bug that resulted in output FITS gaul tables being improperly sorted. diff --git a/CEP/PyBDSM/src/python/_version.py b/CEP/PyBDSM/src/python/_version.py index 878764a422b963d9bd7888b88e28a492376fd8ca..571659cfd7444d1748c3dd0e2c741affb55d5a26 100644 --- a/CEP/PyBDSM/src/python/_version.py +++ b/CEP/PyBDSM/src/python/_version.py @@ -9,7 +9,7 @@ adding to the changelog will naturally do this. """ # Version number -__version__ = '1.4.2' +__version__ = '1.4.3' # Store svn Revision number. For this to work, one also needs to do: # @@ -27,6 +27,19 @@ def changelog(): PyBDSM Changelog. ----------------------------------------------------------------------- + 2012/10/04 - Version 1.4.3 + + 2012/10/04 - Fixed a bug in the mean map calculation that caused mean + maps with constant values (i.e., non-2D maps) to have values of + 0.0 Jy/beam unless "mean_map = 'const'" was explicitly specified. + Fixed a bug in Gaussian fitting that could cause an island to be + skipped. + + 2012/10/02 - Fixed a bug in the PSF vary module that resulted in + incorrect PSF generators being used. Added an option to smooth + the resulting PSF images ("psf_smooth"). Parallelized the PSF + interpolation and smoothing steps. Improved PSF vary documentation. + 2012/09/25 - Version 1.4.2 2012/09/25 - Dramatically reduced the time required to identify valid diff --git a/CEP/PyBDSM/src/python/gausfit.py b/CEP/PyBDSM/src/python/gausfit.py index 5943ee62af9b1382e9ca9c2ce1077f9b97fba6f4..930b8e6026815eab877dd2fe89e8a41aaeb4de62 100644 --- a/CEP/PyBDSM/src/python/gausfit.py +++ b/CEP/PyBDSM/src/python/gausfit.py @@ -240,6 +240,7 @@ class Op_gausfit(Op): """ from _cbdsm import MGFunction + if ffimg == None: fcn = MGFunction(isl.image-isl.islmean, isl.mask_active, 1) else: @@ -300,13 +301,14 @@ class Op_gausfit(Op): break if not fitok: # If normal fitting fails, try to fit 5 or fewer Gaussians to the island - ngmax = 5 + ngmax = 6 while not fitok and ngmax > 1: - fitok = self.fit_iter([], 0, fcn, dof, beam, thr0, 1, 'simple', ngmax, verbose) ngmax -= 1 + fitok = self.fit_iter([], 0, fcn, dof, beam, thr0, 1, 'simple', ngmax, verbose) gaul, fgaul = self.flag_gaussians(fcn.parameters, opts, beam, thr0, peak, shape, isl.mask_active, isl.image, size) + sm_isl = nd.binary_dilation(isl.mask_active) if not fitok and N.sum(~sm_isl) >= img.minpix_isl: # If all else fails, shrink the island a little and try one last time @@ -348,7 +350,6 @@ class Op_gausfit(Op): if fitok and len(fgaul) == 0: break - ### return whatever we got isl.mg_fcn = fcn gaul = [self.fixup_gaussian(isl, g) for g in gaul] @@ -807,7 +808,6 @@ class Op_gausfit(Op): elif mask[tuple(pt)]: flag += 256 break - return flag def fixup_gaussian(self, isl, gaussian): diff --git a/CEP/PyBDSM/src/python/interface.py b/CEP/PyBDSM/src/python/interface.py index e1c487d2f4d78158a90e97ed9988ed90e35e0270..8b08e5a2d258e0a990bb2f1999706e3dd3cee9c5 100644 --- a/CEP/PyBDSM/src/python/interface.py +++ b/CEP/PyBDSM/src/python/interface.py @@ -545,9 +545,11 @@ def export_image(img, outfile=None, img_format='fits', 'gaus_model' - Gaussian model image 'shap_resid' - Shapelet model residual image 'shap_model' - Shapelet model image - 'psf_major' - PSF major axis FWHM image - 'psf_minor' - PSF minor axis FWHM image - 'psf_pa' - PSF position angle image + 'psf_major' - PSF major axis FWHM image (FWHM in arcsec) + 'psf_minor' - PSF minor axis FWHM image (FWHM in arcsec) + 'psf_pa' - PSF position angle image (degrees east of north) + 'psf_ratio' - PSF peak-to-total flux ratio (in units of 1/beam) + 'psf_ratio_aper' - PSF peak-to-aperture flux ratio (in units of 1/beam) """ import os import functions as func @@ -625,6 +627,14 @@ def export_image(img, outfile=None, img_format='fits', func.write_image_to_file(use_io, filename, img.psf_vary_pa, img, bdir, clobber=clobber) + elif img_type == 'psf_ratio': + func.write_image_to_file(use_io, filename, + img.psf_vary_ratio, img, bdir, + clobber=clobber) + elif img_type == 'psf_ratio_aper': + func.write_image_to_file(use_io, filename, + img.psf_vary_ratio_aper, img, bdir, + clobber=clobber) elif img_type == 'gaus_resid': im = img.resid_gaus func.write_image_to_file(use_io, filename, diff --git a/CEP/PyBDSM/src/python/multi_proc.py b/CEP/PyBDSM/src/python/multi_proc.py index bba5c1f8cf5affa896efcd0cfc8111b126d122e7..2e058f27932f81b7dc7fbf71a2b259c15291186d 100644 --- a/CEP/PyBDSM/src/python/multi_proc.py +++ b/CEP/PyBDSM/src/python/multi_proc.py @@ -113,7 +113,6 @@ def run_tasks(procs, err_q, out_q, num): return numpy.concatenate(results).tolist() - def parallel_map(function, sequence, numcores=None, bar=None, weights=None): """ A parallelized version of the native Python map function that @@ -189,7 +188,7 @@ def parallel_map(function, sequence, numcores=None, bar=None, weights=None): for indx, weight in enumerate(weights): temp_sum += weight if temp_sum > weight_per_core: - cut_values.append(indx) + cut_values.append(indx+1) temp_sum = weight if len(cut_values) > numcores - 1: cut_values = cut_values[0:numcores-1] diff --git a/CEP/PyBDSM/src/python/opts.py b/CEP/PyBDSM/src/python/opts.py index 5482ed6831678022897d374cc6dd0aa692ea040c..50dd1f59e196d21731b2fde2ddea897a9a0d4851 100644 --- a/CEP/PyBDSM/src/python/opts.py +++ b/CEP/PyBDSM/src/python/opts.py @@ -879,6 +879,11 @@ class Opts(object): "considered 'unresolved' and are used further to "\ "estimate the PSFs.", group = "psf_vary_do") + psf_smooth = Option(None, Float(), + doc = "Size of Gaussian to use for smoothing of "\ + "interpolated images in arcsec. None => no "\ + "smoothing", + group = "psf_vary_do") psf_snrcut = Float(10.0, doc = "Minimum SNR for statistics\n"\ "Only Gaussians with SNR greater than this are "\ @@ -1093,7 +1098,7 @@ class Opts(object): group = 'hidden') img_type = Enum('gaus_resid', 'shap_resid', 'rms', 'mean', 'gaus_model', 'shap_model', 'ch0', 'pi', 'psf_major', 'psf_minor', - 'psf_pa', + 'psf_pa', 'psf_ratio', 'psf_ratio_aper', doc = "Type of image to export: 'gaus_resid', "\ "'shap_resid', 'rms', 'mean', 'gaus_model', "\ "'shap_model', 'ch0', 'pi', 'psf_major', "\ @@ -1107,9 +1112,11 @@ class Opts(object): "'gaus_model' - Gaussian model image\n"\ "'shap_resid' - Shapelet model residual image\n"\ "'shap_model' - Shapelet model image\n"\ - "'psf_major' - PSF major axis FWHM (in pixels) image\n"\ - "'psf_minor' - PSF minor axis FWHM (in pixels) image\n"\ - "'psf_pa' - PSF position angle (E from N in degrees) image\n", + "'psf_major' - PSF major axis FWHM image (FWHM in arcsec)\n"\ + "'psf_minor' - PSF minor axis FWHM image (FWHM in arcsec)\n"\ + "'psf_pa' - PSF position angle image (degrees east of north)\n"\ + "'psf_ratio' - PSF peak-to-total flux ratio (in units of 1/beam)\n"\ + "'psf_ratio_aper' - PSF peak-to-aperture flux ratio (in units of 1/beam)", group = 'hidden') ch0_image = Bool(True, doc = "Show the ch0 image. This is the image used for "\ @@ -1154,11 +1161,11 @@ class Opts(object): group = "hidden") psf_major = Bool(False, doc = "Show the PSF major axis variation (values are "\ - "FWHM in pixels)", + "FWHM in arcsec)", group = "hidden") psf_minor = Bool(False, doc = "Show the FWHM of PSF minor axis variation (values are "\ - "FWHM in pixels)", + "FWHM in arcsec)", group = "hidden") psf_pa = Bool(False, doc = "Show the PSF position angle variation (values are "\ diff --git a/CEP/PyBDSM/src/python/plotresults.py b/CEP/PyBDSM/src/python/plotresults.py index 3070e03434375c68ef38678ab835fa9482a95b71..cf9db04e38704516e346628013086c374d516958 100644 --- a/CEP/PyBDSM/src/python/plotresults.py +++ b/CEP/PyBDSM/src/python/plotresults.py @@ -667,14 +667,14 @@ def format_coord_psf_maj(x, y): """Custom coordinate format for PSF major image""" global img_psf_maj im = img_psf_maj - coord_str = make_coord_str(x, y, im, unit='pixels') + coord_str = make_coord_str(x, y, im, unit='arcsec') return coord_str def format_coord_psf_min(x, y): """Custom coordinate format for PSF minor image""" global img_psf_min im = img_psf_min - coord_str = make_coord_str(x, y, im, unit='pixels') + coord_str = make_coord_str(x, y, im, unit='arcsec') return coord_str def format_coord_psf_pa(x, y): diff --git a/CEP/PyBDSM/src/python/psf_vary.py b/CEP/PyBDSM/src/python/psf_vary.py index ed4f7ea8958e0120e6998d9a2db237f34ef4f64f..02d1971d6bc1bf370e0174974e9d810c64f04307 100644 --- a/CEP/PyBDSM/src/python/psf_vary.py +++ b/CEP/PyBDSM/src/python/psf_vary.py @@ -19,6 +19,9 @@ import nat from math import * import statusbar from const import fwsig +import multi_proc as mp +import itertools + class Op_psf_vary(Op): """Computes variation of psf across the image """ @@ -128,7 +131,6 @@ class Op_psf_vary(Op): tile_prop = self.edit_vorogenlist(vorogenP, frac=0.9) # tesselate the image - #volrank, volrank_tilenum, wts = tesselate(vorogenP, vorogenS, tile_prop, tess_method, tess_sc, tess_fuzzy, \ volrank, vorowts = self.tesselate(vorogenP, vorogenS, tile_prop, tess_method, tess_sc, tess_fuzzy, \ generators, gencode, image.shape) if opts.output_all: @@ -230,35 +232,79 @@ class Op_psf_vary(Op): bar.stop() # Interpolate Gaussian parameters - psf_maj_int = self.interp_prop(psf_maj, psfcoords, image.shape) - psf_min_int = self.interp_prop(psf_min, psfcoords, image.shape) - psf_pa_int = self.interp_prop(psf_pa, psfcoords, image.shape) - psf_ratio_int = self.interp_prop(psfratio, psfcoords, image.shape) - psf_ratio_aper_int = self.interp_prop(psfratio_aper, psfcoords, image.shape) + if img.aperture == None: + psf_maps = [psf_maj, psf_min, psf_pa, psfratio] + else: + psf_maps = [psf_maj, psf_min, psf_pa, psfratio, psfratio_aper] + nimgs = len(psf_maps) + bar = statusbar.StatusBar('Interpolating PSF images ................ : ', 0, nimgs) + if img.opts.quiet == False: + bar.start() + map_list = mp.parallel_map(func.eval_func_tuple, + itertools.izip(itertools.repeat(self.interp_prop), + psf_maps, itertools.repeat(psfcoords), + itertools.repeat(image.shape)), numcores=opts.ncores, + bar=bar) + if img.aperture == None: + psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int = map_list + else: + psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int, psf_ratio_aper_int = map_list + + # Smooth if desired + if img.opts.psf_smooth != None: + sm_scale = img.opts.psf_smooth / img.pix2beam([1.0, 1.0, 0.0])[0] / 3600.0 # pixels + if img.opts.aperture == None: + psf_maps = [psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int] + else: + psf_maps = [psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int, psf_ratio_aper_int] + nimgs = len(psf_maps) + bar = statusbar.StatusBar('Smoothing PSF images .................... : ', 0, nimgs) + if img.opts.quiet == False: + bar.start() + map_list = mp.parallel_map(func.eval_func_tuple, + itertools.izip(itertools.repeat(self.blur_image), + psf_maps, itertools.repeat(sm_scale)), numcores=opts.ncores, + bar=bar) + if img.aperture == None: + psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int = map_list + else: + psf_maj_int, psf_min_int, psf_pa_int, psf_ratio_int, psf_ratio_aper_int = map_list + + # Make sure all smoothed, interpolated images are ndarrays + psf_maj_int = N.array(psf_maj_int) + psf_min_int = N.array(psf_min_int) + psf_pa_int = N.array(psf_pa_int) + psf_ratio_int = N.array(psf_ratio_int) + if img.aperture == None: + psf_ratio_aper_int = N.zeros(psf_maj_int.shape) + else: + psf_ratio_aper_int = N.array(psf_ratio_aper_int) + + # Store interpolated images. The major and minor axis images are + # the sigma in units of arcsec, the PA image in units of degrees east of + # north, the ratio images in units of 1/beam. + img.psf_vary_maj = psf_maj_int * img.pix2beam([1.0, 1.0, 0.0])[0] * 3600.0 # sigma in arcsec + img.psf_vary_min = psf_min_int * img.pix2beam([1.0, 1.0, 0.0])[0] * 3600.0 # sigma in arcsec + img.psf_vary_pa = psf_pa_int + img.psf_vary_ratio = psf_ratio_int # in 1/beam + img.psf_vary_ratio_aper = psf_ratio_aper_int # in 1/beam # Blank with NaNs if needed mask = img.mask if isinstance(mask, N.ndarray): pix_masked = N.where(mask == True) - psf_maj_int[pix_masked] = N.nan - psf_min_int[pix_masked] = N.nan - psf_pa_int[pix_masked] = N.nan - psf_ratio_int[pix_masked] = N.nan - psf_ratio_aper_int[pix_masked] = N.nan - - # Store interpolated images - img.psf_vary_maj = psf_maj_int - img.psf_vary_min = psf_min_int - img.psf_vary_pa = psf_pa_int - img.psf_vary_ratio = psf_ratio_int - img.psf_vary_ratio_aper = psf_ratio_aper_int + img.psf_vary_maj[pix_masked] = N.nan + img.psf_vary_min[pix_masked] = N.nan + img.psf_vary_pa[pix_masked] = N.nan + img.psf_vary_ratio[pix_masked] = N.nan + img.psf_vary_ratio_aper[pix_masked] = N.nan if opts.output_all: - func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_maj.fits', psf_maj_int*fwsig, img, dir) - func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_min.fits', psf_min_int*fwsig, img, dir) - func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_pa.fits', psf_pa_int, img, dir) - func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_ratio.fits', psf_ratio_int, img, dir) - func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_ratio_aper.fits', psf_ratio_aper_int, img, dir) + func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_maj.fits', img.psf_vary_maj*fwsig, img, dir) + func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_min.fits', img.psf_vary_min*fwsig, img, dir) + func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_pa.fits', img.psf_vary_pa, img, dir) + func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_ratio.fits', img.psf_vary_ratio, img, dir) + func.write_image_to_file(img.use_io, img.imagename + '.psf_vary_ratio_aper.fits', img.psf_vary_ratio_aper, img, dir) # Loop through source and Gaussian lists and deconvolve the sizes using appropriate beam bar2 = statusbar.StatusBar('Correcting deconvolved source sizes ..... : ', 0, img.nsrc) @@ -300,8 +346,8 @@ class Op_psf_vary(Op): def bindata(self, over,num): #ptpbin,nbin,ptplastbin, same as get_bins in fBDSM. - if num <100: ptpbin=num/5 - if num >100: ptpbin=num/10 + if num <= 100: ptpbin=num/5 + if num > 100: ptpbin=num/10 if num > 1000: ptpbin=num/20 if ptpbin % 2 == 1: ptpbin=ptpbin+1 if num < 10: ptpbin=num @@ -488,43 +534,30 @@ class Op_psf_vary(Op): ################################################################################################## def get_voronoi_generators(self, g_gauls, generators, gencode, snrcut, snrtop, snrbot, snrcutstack): """This gets the list of all voronoi generators. It is either the centres of the brightest - sources, or is imported from metadata (in future). generators=calib implies only one source - per facet, and sources between snrtop and snrmax are primary generators. generators=field - implies all sources between snrbot and snrtop are secondary generators. This is the same as - get_voronoi_generators.f in fBDSM. If calibrators='field' then vorogenS is a list of gen.s else - is None.""" + sources, or is imported from metadata (in future).""" from math import sqrt num=len(g_gauls[0]) snr=N.asarray(g_gauls[1])/N.asarray(g_gauls[8]) index=snr.argsort() - snr = snr[index] -# snr = snr[::-1] + snr_incr = snr[index] + snr = snr_incr[::-1] x = N.asarray(g_gauls[2])[index] y = N.asarray(g_gauls[3])[index] - cutoff = 0; npts = 0 + cutoff = 0 if generators == 'calibrators' or generators == 'field': - if gencode != 'file': gencode = 'list' + if gencode != 'file': + gencode = 'list' if gencode == 'list': cutoff = int(round(num*(snrtop))) - if cutoff == len(snr): - cutoff -= 1 + if cutoff > len(snr): + cutoff = len(snr) # Make sure we don't fall below snrcutstack (SNR cut for stacking of PSFs), since # it makes no sense to make tiles with generators that fall below this cut. - if snr[cutoff] < snrcutstack: cutoff = snr.searchsorted(snrcutstack) - if cutoff < 2: - cutoff = 2 - npts = num - cutoff + 1 - -# if generators == 'field': -# cutoff = int(round(num*(1.0-snrtop))) -# if cutoff < 2: -# cutoff = 2 -# npts = num - cutoff + 1 -# cutoffs = int(round(num*(1.0-snrbot))) -# nptss = cutoff - cutoffs + if snr[cutoff-1] < snrcutstack: + cutoff = num - snr_incr.searchsorted(snrcutstack) if generators == 'calibrators': if gencode == 'file': @@ -534,17 +567,10 @@ class Op_psf_vary(Op): y1 = y.tolist() x1.reverse() y1.reverse() - x=x1 - y=y1 snr1 = snr.tolist() - snr1.reverse() - snr = snr1 - vorogenP = N.asarray([x[0:cutoff-2], y[0:cutoff-2], snr[0:cutoff-2]]) + vorogenP = N.asarray([x1[0:cutoff], y1[0:cutoff], snr1[0:cutoff]]) - # for generator=field vorogenS = None - if generators == 'field': - vorogenS = N.asarray([x[cutoff-2:cutoffs-2:-1], y[cutoff-2:cutoffs-2:-1], snr[cutoff-2:cutoffs-2:-1]]) return vorogenP, vorogenS @@ -554,9 +580,8 @@ class Op_psf_vary(Op): have more than one generator to be averaged. tile_list is a list of arrays, indexed by the tile number and each array is an array of numbers in the ngen list which are the generators in that tile. xtile, ytile and snrtile are arrays of length number_of_tiles - and have x,y,snr of each tile. The list of tiles is modified later - using the secondary list in tesselate. For now though, just group together gen.s - if closer than a fraction of dist to third closest. Same as edit_vorogenlist in fBDSM. """ + and have x,y,snr of each tile. Group together generators + if closer than a fraction of dist to third closest.""" xgen, ygen, snrgen = vorogenP flag = N.zeros(len(xgen)) @@ -784,6 +809,7 @@ class Op_psf_vary(Op): and pass it to stackpsf with a weight for each gaussian, to calculate the average psf per tile. Should define weights inside a tile to include closure errors """ + mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Psf_Vary") tile_list, tile_coord, tile_snr = tile_prop tr_gaul = self.trans_gaul(g_gauls) @@ -795,10 +821,10 @@ class Op_psf_vary(Op): psfratio_aper = [] # ratio of peak flux to aperture flux srcpertile = N.zeros(ntile) snrpertile = N.zeros(ntile) + xt, yt = N.transpose(tile_coord) if plot: pl.figure(None) - xt, yt = N.transpose(tile_coord) colours=['b','g','r','c','m','y','k']*(len(xt)/7+1) pl.axis([0.0, image.shape[0], 0.0, image.shape[1]]) pl.title('Tesselated image with tile centres and unresolved sources') @@ -810,7 +836,6 @@ class Op_psf_vary(Op): tile_gauls = [n for n in tr if volrank[int(round(n[2])),int(round(n[3]))]-1 \ == itile] t_gauls = self.trans_gaul(tile_gauls) - srcpertile[itile] = len(tile_gauls) if plot: pl.plot(t_gauls[2], t_gauls[3], 'x'+'k', mew=1.3)#colours[itile]) @@ -820,6 +845,8 @@ class Op_psf_vary(Op): pl.plot(xx,yy,'-'+colours[itile]) wts = N.asarray(t_gauls[1])/N.asarray(t_gauls[8]) # wt is SNR snrpertile[itile] = sum(wts) + mylog.info('PSF tile #%i (center = %i, %i): %i unresolved sources, SNR = %.1f' % + (itile, xt[itile], yt[itile], srcpertile[itile], snrpertile[itile])) a = self.stackpsf(image, beam, t_gauls, wts, cdelt, factor) psfimages.append(a) psfcoords.append([sum(N.asarray(t_gauls[2])*wts)/sum(wts), sum(N.asarray(t_gauls[3])*wts)/sum(wts)]) @@ -924,19 +951,6 @@ class Op_psf_vary(Op): yo=N.arange(0.0,round(imshape[1]), round(compress)) rgrid=nat.Natgrid(xi,yi,xo,yo) prop_int = rgrid.rgrd(prop) -# if img.masked: -# unmasked = N.where(~mask) -# stdprop = N.std(prop_int[unmasked]) -# minprop = N.min(prop_int[unmasked]) -# maxprop = N.max(prop_int[unmasked]) -# else: -# stdprop = N.std(prop_int) -# minprop = N.min(prop_int) -# maxprop = N.max(prop_int) -# if (maxprop - minprop) > 3.0*stdprop: -# return prop_int -# else: -# return N.mean(prop_int) return prop_int ################################################################################################## @@ -995,3 +1009,18 @@ class Op_psf_vary(Op): # return blah +################################################################################################## + def blur_image(self, im, n, ny=None) : + """ blurs the image by convolving with a gaussian kernel of typical + size n. The optional keyword argument ny allows for a different + size in the y direction. + """ + from scipy.ndimage import gaussian_filter + + sx = n + if ny != None: + sy = ny + else: + sy = n + improc = gaussian_filter(im, [sy, sx]) + return improc diff --git a/CEP/PyBDSM/src/python/readimage.py b/CEP/PyBDSM/src/python/readimage.py index 5fa6119d635d2fdb6ee299ec17075c4730cd8f16..e53b2e3ff939dd61bdd8370ca56f5278be1accda 100644 --- a/CEP/PyBDSM/src/python/readimage.py +++ b/CEP/PyBDSM/src/python/readimage.py @@ -103,6 +103,10 @@ class Op_readimage(Op): root, ext = os.path.splitext(img.opts.filename) if ext in ['.fits', '.FITS', '.image']: fname = root + elif ext in ['.gz', '.GZ']: + root2, ext2 = os.path.splitext(root) + if ext2 in ['.fits', '.FITS', '.image']: + fname = root2 else: fname = img.opts.filename img.filename = img.opts.filename @@ -140,9 +144,9 @@ class Op_readimage(Op): Thanks to transpose operation done to image earlier we can use p2s & s2p transforms directly. - + Both WCSLIB (from LOFAR svn) and PyWCS (http://stsdas.stsci.edu/ - astrolib/pywcs/, available from https://trac6.assembla.com/astrolib) + astrolib/pywcs/, available from https://trac6.assembla.com/astrolib) are supported. """ try: @@ -220,7 +224,7 @@ class Op_readimage(Op): img.sky2pix = t.s2p elif img.use_wcs == 'pywcs': # Here we define new p2s and s2p methods to match those of wcslib. - # Note that, due to a bug in pywcs version 1.10-4.7, the + # Note that, due to a bug in pywcs version 1.10-4.7, the # "ra_dec_order" option cannot be used. When the bug is fixed, # this option should probably be re-enabled. def p2s(self, xy): @@ -269,7 +273,7 @@ class Op_readimage(Op): ### define beam conversion routines: def beam2pix(x, location=None): """ Converts beam in deg to pixels. - + location specifies the location in pixels (x, y) for which beam is desired Input beam angle should be degrees CCW from North. The output beam angle is degrees CCW from the +y axis of the image. @@ -290,7 +294,7 @@ class Op_readimage(Op): def pix2beam(x, location=None): """ Converts beam in pixels to deg. - + location specifies the location in pixels (x, y) for which beam is desired Input beam angle should be degrees CCW from the +y axis of the image. The output beam angle is degrees CCW from North. @@ -376,7 +380,7 @@ class Op_readimage(Op): mylog = mylogger.logging.getLogger("PyBDSM.InitFreq") if img.opts.frequency_sp != None and img.image.shape[1] > 1: # If user specifies multiple frequencies, then let - # collapse.py do the initialization + # collapse.py do the initialization img.cfreq = img.opts.frequency_sp[0] img.freq_pars = (0.0, 0.0, 0.0) mylog.info('Using user-specified frequencies.') @@ -469,7 +473,7 @@ class Op_readimage(Op): def get_rot(self, img, location=None): """Returns CCW rotation angle (in degrees) between N and +y axis of image - + location specifies the location in pixels (x, y) for which beam is desired """ if location == None: diff --git a/CEP/PyBDSM/src/python/rmsimage.py b/CEP/PyBDSM/src/python/rmsimage.py index 3ed75ea76706db9cb8c85ea92c4d31a3963abc3e..9fb061f49081580f76532e6f20625547b1527927 100644 --- a/CEP/PyBDSM/src/python/rmsimage.py +++ b/CEP/PyBDSM/src/python/rmsimage.py @@ -214,7 +214,7 @@ class Op_rmsimage(Op): if ((opts.rms_map is not False) or (opts.mean_map not in ['zero', 'const'])) and img.rms_box2[0] > min(img.ch0.shape)/4.0: # rms box is too large - just use constant rms and mean self.output_rmsbox_size(img) - mylog.warning('Size of rms_box larger than 1/4 of image size') + mylogger.userinfo(mylog, 'Size of rms_box larger than 1/4 of image size') mylogger.userinfo(mylog, 'Using constant background rms and mean') img.use_rms_map = False img.mean_map_type = 'const' @@ -236,8 +236,7 @@ class Op_rmsimage(Op): new_step = int(new_width/3.0) img.rms_box = (new_width, new_step) if img.rms_box[0] > min(img.ch0.shape)/4.0: - #self.output_rmsbox_size(img) - mylog.warning('Size of rms_box larger than 1/4 of image size') + mylogger.userinfo(mylog, 'Size of rms_box larger than 1/4 of image size') mylogger.userinfo(mylog, 'Using constant background rms and mean') img.use_rms_map = False img.mean_map_type = 'const' @@ -274,8 +273,7 @@ class Op_rmsimage(Op): new_step = int(new_width/3.0) img.rms_box = (new_width, new_step) if img.rms_box[0] > min(img.ch0.shape)/4.0: - #self.output_rmsbox_size(img) - mylog.warning('Size of rms_box larger than 1/4 of image size') + mylogger.userinfo(mylog, 'Size of rms_box larger than 1/4 of image size') mylogger.userinfo(mylog, 'Using constant background rms and mean') img.use_rms_map = False img.mean_map_type = 'const' @@ -340,8 +338,10 @@ class Op_rmsimage(Op): '(%.5f, %.5f) Jy/beam' % (rms_min, rms_max)) if img.mean_map_type != 'map': - val = 0.0 - if opts.mean_map == 'const': val = img.clipped_mean + if opts.mean_map == 'zero': + val = 0.0 + else: + val = img.clipped_mean mean[:] = val mylogger.userinfo(mylog, 'Value of background mean' + pol_txt, str(round(val,5))+' Jy/beam') diff --git a/CEP/PyBDSM/src/python/spectralindex.py b/CEP/PyBDSM/src/python/spectralindex.py index 6e7b80b2dbac27812b1adf8d14e0b24b21d41a43..9bf81a841d4c57beba382543cea7db2d9883af31 100644 --- a/CEP/PyBDSM/src/python/spectralindex.py +++ b/CEP/PyBDSM/src/python/spectralindex.py @@ -1,6 +1,6 @@ """Module Spectral index. - - This module calculates spectral indices for Gaussians and sources for a multichannel cube. + + This module calculates spectral indices for Gaussians and sources for a multichannel cube. """ @@ -10,7 +10,7 @@ import mylogger from gaul2srl import Source from copy import deepcopy as cp import _cbdsm -import collapse +import collapse import sys import functions as func import time @@ -24,34 +24,34 @@ Gaussian.specin_flux = List(Float(), doc = "Total flux density per channel, Jy", Gaussian.specin_fluxE = List(Float(), doc = "Error in total flux density per channel, Jy", colname=['E_Total_flux'], units=['Jy']) Gaussian.specin_freq = List(Float(), doc = "Frequency per channel, Hz", colname=['Freq'], units=['Hz']) Source.spec_indx = Float(doc = "Spectral index", colname='Spec_Indx', units=None) -Source.e_spex_indx = Float(doc = "Error in spectral index", colname='E_Spec_Indx', units=None) +Source.e_spec_indx = Float(doc = "Error in spectral index", colname='E_Spec_Indx', units=None) Source.specin_flux = List(Float(), doc = "Total flux density, Jy", colname=['Total_flux'], units=['Jy']) Source.specin_fluxE = List(Float(), doc = "Error in total flux density per channel, Jy", colname=['E_Total_flux'], units=['Jy']) Source.specin_freq = List(Float(), doc = "Frequency per channel, Hz", colname=['Freq'], units=['Hz']) class Op_spectralindex(Op): """Computes spectral index of every gaussian and every source. - + First do a quick fit to all channels to determine whether averaging over frequency is needed to obtain desired SNR (set by img.opts.specind_snr). - This averaging should be done separately for both Gaussians and + This averaging should be done separately for both Gaussians and sources. For S and C sources, averaging only needs to be done once (as the sources have only one Gaussian). - + For M sources, averaging is needed twice: once to obtain the desired SNR for the faintest Gaussian in the source, and once to obtain the desired SNR for the source as a whole. - + If averaging is needed for a given source, don't let the number of resulting channels fall below 2. If it is not possible to obtain the desired SNR in 2 or more channels, set spec_indx of Gaussian/source to NaN. - + """ def __call__(self, img): global bar1 - + mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"SpectIndex") img.mylog = mylog if img.opts.spectralindex_do: @@ -62,9 +62,9 @@ class Op_spectralindex(Op): self.freq_beamsp_unav(img) sbeam = img.beam_spectrum freqin = img.freq - + # calc initial channel flags if needed - iniflags = self.iniflag(img) + iniflags = self.iniflag(img) img.specind_iniflags = iniflags good_chans = N.where(iniflags == False) unav_image = img.image[0][good_chans] @@ -74,24 +74,24 @@ class Op_spectralindex(Op): mylog.info('After initial flagging of channels by rms, %i good channels remain' % (nchan,)) if nmax_to_avg == 0: nmax_to_avg = nchan - + # calculate the rms map of each unflagged channel bar1 = statusbar.StatusBar('Determing rms for channels in image ..... : ', 0, nchan) if img.opts.quiet == False: bar1.start() rms_spec = self.rms_spectrum(img, unav_image) # bar1 updated here - + bar2 = statusbar.StatusBar('Calculating spectral indices for sources : ', 0, img.nsrc) c_wts = img.opts.collapse_wt snr_desired = img.opts.specind_snr - + if img.opts.quiet == False and img.opts.verbose_fitting == False: bar2.start() for src in img.sources: isl = img.islands[src.island_id] isl_bbox = isl.bbox - - # Fit each channel with ch0 Gaussian(s) of the source, + + # Fit each channel with ch0 Gaussian(s) of the source, # allowing only the normalization to vary. chan_images = unav_image[:, isl_bbox[0], isl_bbox[1]] chan_rms = rms_spec[:, isl_bbox[0], isl_bbox[1]] @@ -102,7 +102,7 @@ class Op_spectralindex(Op): # and is True if measured flux is upper limit. n_good_chan_per_gaus is array of N_gaussians # that gives number of unmasked channels for each Gaussian. gaus_mask, n_good_chan_per_gaus = self.mask_upper_limits(unavg_total_flux, e_unavg_total_flux, snr_desired) - + # Average if needed and fit again # First find flux of faintest Gaussian of source and use it to estimate rms_desired gflux = [] @@ -110,33 +110,33 @@ class Op_spectralindex(Op): gflux.append(g.peak_flux) rms_desired = min(gflux)/snr_desired total_flux = unavg_total_flux - e_total_flux = e_unavg_total_flux - freq_av = unav_freqs + e_total_flux = e_unavg_total_flux + freq_av = unav_freqs nchan = chan_images.shape[0] nchan_prev = nchan while min(n_good_chan_per_gaus) < 2 and nchan > 2: - avimages, beamlist, freq_av, crms_av = self.windowaverage_cube(chan_images, rms_desired, chan_rms, + avimages, beamlist, freq_av, crms_av = self.windowaverage_cube(chan_images, rms_desired, chan_rms, c_wts, sbeam, freqin, nmax_to_avg=nmax_to_avg) - total_flux, e_total_flux = self.fit_channels(img, avimages, crms_av, src, beamlist) + total_flux, e_total_flux = self.fit_channels(img, avimages, crms_av, src, beamlist) gaus_mask, n_good_chan_per_gaus = self.mask_upper_limits(total_flux, e_total_flux, snr_desired) nchan = avimages.shape[0] if nchan == nchan_prev: break nchan_prev = nchan rms_desired *= 0.8 - + # Now fit Gaussian fluxes to obtain spectral indices. - # Only fit if there are detections (at specified sigma threshold) - # in at least two bands. If not, don't fit and set spec_indx + # Only fit if there are detections (at specified sigma threshold) + # in at least two bands. If not, don't fit and set spec_indx # and error to NaN. for ig, gaussian in enumerate(src.gaussians): npos = len(N.where(total_flux[:, ig] > 0.0)[0]) if img.opts.verbose_fitting: if img.opts.flagchan_snr: - print 'Gaussian #%i : averaged to %i channels, of which %i meet SNR criterion' % (gaussian.gaus_num, + print 'Gaussian #%i : averaged to %i channels, of which %i meet SNR criterion' % (gaussian.gaus_num, len(total_flux[:, ig]), n_good_chan_per_gaus[ig]) else: - print 'Gaussian #%i : averaged to %i channels, all of which will be used' % (gaussian.gaus_num, + print 'Gaussian #%i : averaged to %i channels, all of which will be used' % (gaussian.gaus_num, len(total_flux[:, ig])) if (img.opts.flagchan_snr and n_good_chan_per_gaus[ig] < 2) or npos < 2: gaussian.spec_indx = N.NaN @@ -160,7 +160,7 @@ class Op_spectralindex(Op): gaussian.specin_fluxE = e_fluxes_to_fit.tolist() gaussian.specin_freq = freqs_to_fit.tolist() gaussian.specin_freq0 = N.median(freqs_to_fit) - + # Next fit total source fluxes for spectral index. if len(src.gaussians) > 1: # First, check unaveraged SNRs for total source. @@ -169,18 +169,18 @@ class Op_spectralindex(Op): src_total_flux[:,0] = N.sum(unavg_total_flux, 1) # sum over all Gaussians in source to obtain total fluxes in each channel src_e_total_flux[:,0] = N.sqrt(N.sum(N.power(e_unavg_total_flux, 2.0), 1)) src_mask, n_good_chan = self.mask_upper_limits(src_total_flux, src_e_total_flux, snr_desired) - + # Average if needed and fit again rms_desired = src.peak_flux_max/snr_desired total_flux = unavg_total_flux e_total_flux = e_unavg_total_flux - freq_av = unav_freqs + freq_av = unav_freqs nchan = chan_images.shape[0] nchan_prev = nchan while n_good_chan < 2 and nchan > 2: - avimages, beamlist, freq_av, crms_av = self.windowaverage_cube(chan_images, rms_desired, chan_rms, + avimages, beamlist, freq_av, crms_av = self.windowaverage_cube(chan_images, rms_desired, chan_rms, c_wts, sbeam, freqin, nmax_to_avg=nmax_to_avg) - total_flux, e_total_flux = self.fit_channels(img, avimages, crms_av, src, beamlist) + total_flux, e_total_flux = self.fit_channels(img, avimages, crms_av, src, beamlist) src_total_flux = N.sum(total_flux, 1) # sum over all Gaussians in source to obtain total fluxes in each channel src_e_total_flux = N.sqrt(N.sum(N.power(e_total_flux, 2.0), 1)) src_mask, n_good_chan = self.mask_upper_limits(src_total_flux, src_e_total_flux, snr_desired) @@ -189,18 +189,18 @@ class Op_spectralindex(Op): break nchan_prev = nchan rms_desired *= 0.8 - + # Now fit source for spectral index. src_total_flux = src_total_flux.reshape((src_total_flux.shape[0],)) src_e_total_flux = src_e_total_flux.reshape((src_e_total_flux.shape[0],)) src_mask = src_mask.reshape((src_mask.shape[0],)) if img.opts.verbose_fitting: if img.opts.flagchan_snr: - print 'Source #%i : averaged to %i channels, of which %i meet SNR criterion' % (src.source_id, + print 'Source #%i : averaged to %i channels, of which %i meet SNR criterion' % (src.source_id, len(src_total_flux), nchan) else: - print 'Source #%i : averaged to %i channels, all of which will be used' % (src.source_id, - len(src_total_flux)) + print 'Source #%i : averaged to %i channels, all of which will be used' % (src.source_id, + len(src_total_flux)) npos = len(N.where(src_total_flux > 0.0)) if (img.opts.flagchan_snr and n_good_chan < 2) or npos < 2: src.spec_indx = N.NaN @@ -249,11 +249,11 @@ class Op_spectralindex(Op): #################################################################################### def flagchans_rmschan(self, crms, zeroflags, iniflags, cutoff): - """ Calculate clipped rms (r1) of the rms as fn of channel, crms, with zeroflags + """ Calculate clipped rms (r1) of the rms as fn of channel, crms, with zeroflags applied and kappa=cutoff. Then exclude crms=0 (for NaN mages etc) and get ch.s which are more than cutoff*r1 away from median of rms. If this is less than 10 % - of all channels, flag them. - + of all channels, flag them. + """ # crms_rms and median dont include rms=0 channels @@ -263,8 +263,8 @@ class Op_spectralindex(Op): median = N.median(N.delete(crms, zeroind)) badind = N.where(N.abs(N.delete(crms, zeroind) - median)/crms_rms >=cutoff)[0] frac = len(badind)/(nchan - len(zeroind)) - - if frac <= 0.1: + + if frac <= 0.1: badind = N.where(N.abs(crms - median)/crms_rms >=cutoff)[0] iniflags[badind] = True @@ -274,7 +274,7 @@ class Op_spectralindex(Op): def iniflag(self, img): """ Calculate clipped rms of every channel, and then median and clipped rms of this rms distribution. Exclude channels where rms=0 (all pixels 0 or blanked) and of the remaining, if outliers beyond 5 sigma - are less then 10 % of number of channels, flag them. This is done only when flagchan_rms = True. + are less then 10 % of number of channels, flag them. This is done only when flagchan_rms = True. If False, only rms=0 (meaning, entire channel image is zero or blanked) is flagged.""" image = img.image @@ -288,17 +288,17 @@ class Op_spectralindex(Op): iniflags = cp(zeroflags) if img.opts.flagchan_rms: - iniflags = self.flagchans_rmschan(crms, zeroflags, iniflags, 4.0) + iniflags = self.flagchans_rmschan(crms, zeroflags, iniflags, 4.0) return iniflags - + #################################################################################### def freq_beamsp_unav(self, img): """ Defines img.beam_spectrum and img.freq for the unaveraged cube. """ shp = img.image.shape - sbeam = img.opts.beam_spectrum + sbeam = img.opts.beam_spectrum if sbeam != None and len(sbeam) != shp[1]: sbeam = None # sanity check if sbeam == None: sbeam = [img.beam]*shp[1] @@ -353,7 +353,7 @@ class Op_spectralindex(Op): median_rms = rms_spec str1 = " ".join(["%9.4e" % n for n in img.channel_clippedrms]) - if rms_map: + if rms_map: mylog.debug('%s %s ' % ('Median rms of channels : ', str1)) mylog.info('RMS image made for each channel') else: @@ -366,7 +366,7 @@ class Op_spectralindex(Op): #################################################################################### def fit_specindex(self, freqarr, fluxarr, efluxarr, do_log=False): """ Fits spectral index to data. - + do_log is True/False implies you fit spectral index in logFlux vs logFreq space or not.""" import functions as func import math @@ -384,7 +384,7 @@ class Op_spectralindex(Op): else: x = x/f0; y = flux; sig = eflux funct = func.sp_in - + spin, espin = func.fit_mask_1d(x, y, sig, mask, funct, do_err=True, order=1) if do_log: @@ -395,31 +395,31 @@ class Op_spectralindex(Op): ######################################################################################## - - def windowaverage_cube(self, imagein, rms_desired, chanrms, c_wts, sbeam, + + def windowaverage_cube(self, imagein, rms_desired, chanrms, c_wts, sbeam, freqin, n_min=2, nmax_to_avg=10): """Average neighboring channels of cube to obtain desired rms in at least n_min channels - + The clipped rms of each channel is compared to the desired rms. If the clipped rms is too high, the channel is averaged with as many neighboring channels as necessary to obtain at least the desired rms. This is done until the number of OK channels is 2. The averaging is done first at - the frequency extremes, as frequency range the resulting averaged flux array + the frequency extremes, as frequency range the resulting averaged flux array will be maximized. - - For example, if the desired rms is 0.1 and the list of rms's is: - + + For example, if the desired rms is 0.1 and the list of rms's is: + [0.2, 0.2, 0.3, 0.2, 0.2] - + the resulting channels that will be averaged are: - + [[0, 1], [2], [3, 4]] """ from math import sqrt from collapse import avspc_direct, avspc_blanks - + nchan = imagein.shape[0] - + # chan_list is a list of lists of channels to average. E.g., if we have # 5 channels and we want to average only the first 2: # chan_list = [[0,1], [2], [3], [4]] @@ -428,7 +428,7 @@ class Op_spectralindex(Op): else: crms = chanrms chan_list = self.get_avg_chan_list(rms_desired, crms, nmax_to_avg) - + n_new = len(chan_list) beamlist = [] crms_av = N.zeros(n_new) @@ -438,7 +438,7 @@ class Op_spectralindex(Op): hasblanks = blank.any() for ichan, avg_list in enumerate(chan_list): if len(avg_list) > 1: - if not hasblanks: + if not hasblanks: imageout[ichan], dum = avspc_direct(avg_list, imagein, crms, c_wts) else: imageout[ichan], dum = avspc_blanks(avg_list, imagein, crms, c_wts) @@ -451,9 +451,9 @@ class Op_spectralindex(Op): beamlist.append(sbeam[avg_list[0]]) freq_av[ichan] = N.mean(freqin[avg_list[0]]) crms_av[ichan] = 1.0/sqrt(N.sum(1.0/crms[avg_list[0]]**2)) - - return imageout, beamlist, freq_av, crms_av - + + return imageout, beamlist, freq_av, crms_av + def get_avg_chan_list(self, rms_desired, chanrms, nmax_to_avg): """Returns a list of channels to average to obtain given rms_desired @@ -468,7 +468,7 @@ class Op_spectralindex(Op): rms_avg = chanrms[0] while rms_avg > rms_desired: end += 1 - chan_slice = slice(0, end) + chan_slice = slice(0, end) rms_avg = 1.0/N.sqrt(N.sum(1.0/N.array(chanrms)[chan_slice]**2)) if end == nchan or end == nmax_to_avg: break @@ -481,18 +481,18 @@ class Op_spectralindex(Op): # and return. chan_list = [range(0, int(float(nchan)/2.0)), range(int(float(nchan)/2.0), nchan)] return chan_list - + # Average channels at end of list rms_avg = chanrms[-1] end = nchan start = nchan while rms_avg > rms_desired: start -= 1 - chan_slice = slice(start, end) + chan_slice = slice(start, end) rms_avg = 1.0/N.sqrt(N.sum(1.0/chanrms[chan_slice]/chanrms[chan_slice])) if end-start == nmax_to_avg: break - + if start <= max(chan_list[0]): # This means we cannot get two averaged channels with desired rms, # so just average remaining channels @@ -509,27 +509,27 @@ class Op_spectralindex(Op): for i in range(nchan): chan_list.append([i]) return chan_list - + def fit_channels(self, img, chan_images, clip_rms, src, beamlist): """Fits normalizations of Gaussians in source to multiple channels - - If unresolved, the size of the Gaussians are adjusted to match the + + If unresolved, the size of the Gaussians are adjusted to match the channel's beam size (given by beamlist) before fitting. - - Returns array of total fluxes (N_channels x N_Gaussians) and array + + Returns array of total fluxes (N_channels x N_Gaussians) and array of errors (N_channels x N_Gaussians). """ import functions as func from const import fwsig - + isl = img.islands[src.island_id] isl_bbox = isl.bbox nchan = chan_images.shape[0] x, y = N.mgrid[isl_bbox] gg = src.gaussians fitfix = N.ones(len(gg)) # fit only normalization - srcmask = isl.mask_active + srcmask = isl.mask_active total_flux = N.zeros((nchan, len(fitfix))) # array of fluxes: N_channels x N_Gaussians errors = N.zeros((nchan, len(fitfix))) # array of fluxes: N_channels x N_Gaussians @@ -545,7 +545,7 @@ class Op_spectralindex(Op): rms_isl = N.mean(clip_rms[cind]) errors[cind] = func.get_errors(img, p, rms_isl, bm_pix=(bm_pix[0]*fwsig, bm_pix[1]*fwsig, bm_pix[2]))[6] self.reset_size(gg) - + return total_flux, errors def adjust_size_by_freq(self, beam_ch0, beam, gg): @@ -559,12 +559,12 @@ class Op_spectralindex(Op): g.size_pix_adj[1] *= beam[1] / beam_ch0[1] gg_adj.append(g) return gg_adj - + def reset_size(self, gg): """Reset size of unresolved Gaussians to match the ch0 beam size""" for g in gg: if hasattr(g, 'size_pix_adj'): del g.size_pix_adj - + def mask_upper_limits(self, total_flux, e_total_flux, threshold): """Returns mask of upper limits""" mask = N.zeros(total_flux.shape, dtype=bool) @@ -587,15 +587,15 @@ class Op_spectralindex(Op): if meas_flux < threshold * e_meas_flux: # Upper limit if is_src: - mask[ichan] = True + mask[ichan] = True else: mask[ichan, ig] = True else: # Detection if is_src: ndet += 1 - mask[ichan] = False + mask[ichan] = False else: ndet[ig] += 1 mask[ichan, ig] = False - return mask, ndet \ No newline at end of file + return mask, ndet diff --git a/LCU/Firmware/tools/src/flash_images.sh b/LCU/Firmware/tools/src/flash_images.sh index 9c02160d587e6be7fc50364945aede3177ffdfc3..a479693557f196b47dd515e5237069db47028990 100755 --- a/LCU/Firmware/tools/src/flash_images.sh +++ b/LCU/Firmware/tools/src/flash_images.sh @@ -11,7 +11,7 @@ SyntaxError() exit 1 } -if [ ${#argv} == 0 ]; then +if [ $# == 0 ]; then SyntaxError fi diff --git a/LCU/StationTest/stationtest.py b/LCU/StationTest/stationtest.py index 1eda68a2a730910aa9a58b2836401d91bca5b836..253c81873bbbaae3963e4bd8f7d2404a03ab05de 100755 --- a/LCU/StationTest/stationtest.py +++ b/LCU/StationTest/stationtest.py @@ -3,7 +3,7 @@ # # Run the tests to test a LOFAR station # H. Meulman -# Version 0.14 17-feb-2012 SVN***** +# Version 0.18 2-okt-2012 SVN***** # 24 sep: local log directory aangepast # 27 sept: - Toevoeging delay voor tbbdriver polling @@ -31,7 +31,9 @@ # 27 jan 2012: Store logfiles in /localhome/stationtest/data in "local mode" # 17 feb 2012: Added detection of oscillating tiles. # 9 mar 2012: Devide by 0 error solved in HBAtest -# 13 sept 2012: Added for user0..9 sys.path.append("/opt/stationtest/modules") +# 13 Apr 2012: added LBAdatatest directory. Also directorys need to change permissions to work with USER0. +# 20 Apr 2012: Logging suspicious tiles and elements in HBA modem test +# 13 Sep 2012: Added for user0..9 sys.path.append("/opt/stationtest/modules") # todo: # - Als meer dan 10 elementen geen rf signaal hebben, keur dan hele tile af @@ -69,7 +71,7 @@ factor = 30 # station statistics fault window: Antenna average + and - factor = InternationalStations = ('DE601C','DE602C','DE603C','DE604C','DE605C','FR606C','SE607C','UK608C') RemoteStations = ('CS302C','RS106C','RS205C','RS208C','RS306C','RS307C','RS406C','RS503C') CoreStations = ('CS001C','CS002C','CS003C','CS004C','CS005C','CS006C','CS007C','CS011C','CS013C','CS017C','CS021C','CS024C','CS026C','CS028C','CS030C','CS031','CS032C','CS101C','CS103C','CS201C','CS301C','CS401C','CS501C') -NoHBAelementtestPossible = ('DE601C','DE602C','DE603C','DE605C','FR606C','SE607C','UK608C') +NoHBAelementtestPossible = ('DE601C','DE602C','DE603C','DE605C','FR606C','SE607C','UK608C') # NoHBANaStestPossible = ('') HBASubband = dict( DE601C=155,\ DE602C=155,\ @@ -77,7 +79,7 @@ HBASubband = dict( DE601C=155,\ DE604C=474,\ DE605C=479,\ FR606C=155,\ - SE607C=155,\ + SE607C=287,\ UK608C=155) # Do not change: @@ -340,7 +342,7 @@ def GotoSwlevel2(): time.sleep(120) res = os.popen3('rspctl --datastream=0')[1].readlines() print res - #time.sleep(90) # Tijdelijk toe gevoegd voor nieuwe tbbdriver. Deze loopt vast tijdens pollen +# time.sleep(90) # Tijdelijk toe gevoegd voor nieuwe tbbdriver. Deze loopt vast tijdens pollen # CheckTBB() # Tijdelijk weg gelaten voor nieuwe tbbdriver. Deze loopt vast tijdens pollen #fromprg.close() break @@ -714,10 +716,13 @@ def CheckRSPVersion(): RSPgold = open(RSPgoldfile,'r').readlines() # Read RSP Version gold RSPversion = os.popen3('rspctl --version')[1].readlines() # Get RSP Versions # res = cli.command('./rsp_version.sh') +# debug=1 if debug: + print ('RSPgold = ', RSPgold) for RSPnumber in range(len(RSPgold)): if RSPgold[RSPnumber] == RSPversion[RSPnumber]: print ('RSP OK = ', RSPnumber) else: print ('RSPNOK = ', RSPnumber) +# debug=0 # store subreck testlog for RSPnumber in range(len(RSPgold)): if RSPgold[RSPnumber] != RSPversion[RSPnumber]: @@ -1240,7 +1245,7 @@ def LBAtest(): SeverityOfThisTest=2 PriorityOfThisTest=2 - debug=0 +# debug=1 global Severity global Priority @@ -1249,7 +1254,8 @@ def LBAtest(): sr.setId('LBAmd1>: ') sub_time=[] sub_file=[] - dir_name = './lbadatatest/' #Work directory will be cleaned +# dir_name = './lbadatatest/' #Work directory will be cleaned + dir_name = '/opt/stationtest/test/hbatest/lbadatatest/' #Work directory will be cleaned if not (os.path.exists(dir_name)): os.mkdir(dir_name) rmfile = '*.log' @@ -1561,7 +1567,7 @@ def HBAModemTest(): global Priority global ModemFail - debug=0 +# debug=1 sr.setId('HBAmdt>: ') print ('HBA ModemTest') @@ -1605,6 +1611,61 @@ def HBAModemTest(): count+=1 ModemFail[TileNr]=1 # global variabele om in HBA element test de RF meting over te slaan. +# + if (count > 10 and isodd(RCUNr)): #Als er meer dan 10 fouten in zitten, keur dan hele tile af! + print ('Tile %s - RCU %s; Broken. No modem communication' % (TileNr,RCUNr)) + + # store station testlog + #if debug: print ('ModemFail = ',ModemFail) + if Severity<SeverityOfThisTest: Severity=SeverityOfThisTest + if Priority<PriorityOfThisTest: Priority=PriorityOfThisTest + st_log.write('HBAmdt>: Sv=%s Pr=%s, Tile %s - RCU %s; Suspicious.\n' % (SeverityLevel[SeverityOfThisTest], PriorityLevel[PriorityOfThisTest], TileNr, RCUNr)) + sr.setResult('FAILED') + + else: #Anders keur elementen af als fout. + for ElementNumber in range(4, 20): + if (ModemReply[ElementNumber] != ModemReplyGold[ElementNumber] and isodd(RCUNr)): + print ('Tile %s - RCU %s; Element %s; Suspicious. : (%s, %s)' % (TileNr, RCUNr, ElementNumber-3, ModemReply[ElementNumber], ModemReplyGold[ElementNumber])) + # store station testlog + if Severity<SeverityOfThisTest: Severity=SeverityOfThisTest + if Priority<PriorityOfThisTest: Priority=PriorityOfThisTest + st_log.write('HBAmdt>: Sv=%s Pr=%s, Tile %s - RCU %s; Element %s Suspicious. : (%s, %s)\n' % (SeverityLevel[SeverityOfThisTest], PriorityLevel[PriorityOfThisTest], TileNr, RCUNr, ElementNumber-3, ModemReply[ElementNumber], ModemReplyGold[ElementNumber])) + sr.setResult('FAILED') +# print ('ModemFail = ',ModemFail) + + try: + f=open('/opt/stationtest/test/hbatest/hba_modem3.log','rb') + except: + print ('Import error') + if Severity<SeverityOfThisTest: Severity=SeverityOfThisTest + if Priority<PriorityOfThisTest: Priority=PriorityOfThisTest + st_log.write('HBAmdt>: Sv=%s Pr=%s, No modem-logfile found!\n' % (SeverityLevel[SeverityOfThisTest], PriorityLevel[PriorityOfThisTest])) + return + time.sleep(1) + + for line in f: + ModemReply=line + ModemReplyGold=['HBA', '95', 'real', 'delays=', '253', '253', '253', '253', '253', '253', '253', '253', '253', '253', '253', '253', '253', '253', '253', '253'] + if debug: print ('line = ',line[0]) + if line[0] == 'H': # Check of regel geldig is! + ModemReply=line.replace('[',' ').replace('].',' ').split() + RCUNr=int(ModemReply[1]) + TileNr=RCUNr/2 + if debug: + print ('line = ',line) + print ('ModemReply = ',ModemReply) + print ('ModemReplyGold = ',ModemReplyGold) + print ('RCUNr = ',RCUNr) + print ('TileNr = ',TileNr) + +# Check if HBA modems work! + count=0 + for ElementNumber in range(4, 20): +# print ModemReplyGold[ElementNumber] + if ModemReply[ElementNumber] != ModemReplyGold[ElementNumber]: + count+=1 + ModemFail[TileNr]=1 # global variabele om in HBA element test de RF meting over te slaan. + # if (count > 10 and isodd(RCUNr)): #Als er meer dan 10 fouten in zitten, keur dan hele tile af! print ('Tile %s - RCU %s; Broken. No modem communication' % (TileNr,RCUNr)) @@ -1626,7 +1687,6 @@ def HBAModemTest(): st_log.write('HBAmdt>: Sv=%s Pr=%s, Tile %s - RCU %s; Element %s Broken. No modem communication : (%s, %s)\n' % (SeverityLevel[SeverityOfThisTest], PriorityLevel[PriorityOfThisTest], TileNr, RCUNr, ElementNumber-3, ModemReply[ElementNumber], ModemReplyGold[ElementNumber])) sr.setResult('FAILED') # print ('ModemFail = ',ModemFail) - return diff --git a/MAC/APL/APLCommon/src/swlevel b/MAC/APL/APLCommon/src/swlevel index a1c1b5145b3b74738bef4994da7ddbae81e4f395..8168875889a4b4a9ba8d4bc8313d607ccedd4b78 100644 --- a/MAC/APL/APLCommon/src/swlevel +++ b/MAC/APL/APLCommon/src/swlevel @@ -82,7 +82,7 @@ selectImage() # get version number of RSPboard in var. rsu boardHex=`echo $board | awk '{ printf "%02x", $1 }'` rsu=`sudo ${BINDIR}/../sbin/rsuctl3 -m 10:fa:00:00:$boardHex:00 -qV 2>&1 | grep BP | cut -d':' -f2 | sed 's/ //g' | cut -d'.' -f1` - if [ ${rsu} -eq 1 ]; then + if [ $imageForced ] || [ ${rsu} -eq 1 ]; then let version=$rsu # if board has reset itself to image 0, load image 1 again echo "Loading image $image on RSPboard $board ..." @@ -442,6 +442,7 @@ handle_args() show_lofar_version $flag ;; i) + imageForced=true image=$OPTARG # This is needed to be able to retrieve the requested swlevel # when it is not provided with option -l @@ -512,6 +513,7 @@ fi user=`id | cut -d'(' -f2 | cut -d')' -f1` group=`groups | awk '{print $1}'` +imageForced=false handle_args $* diff --git a/MAC/Deployment/data/OTDB/DPPP.comp b/MAC/Deployment/data/OTDB/DPPP.comp index 3925183493698ae8ea794f62a3ec341af6656de4..0519a9ecec20dfc53c34eca3edfacd67cec3659d 100644 --- a/MAC/Deployment/data/OTDB/DPPP.comp +++ b/MAC/Deployment/data/OTDB/DPPP.comp @@ -128,6 +128,9 @@ uses Options 4.0.0 development 1 'Solver configuration' node demixer 4.0.0 development 'node constraint' "Demix" par type I text - 10 0 "demixer" - "Type of the demixer, do not change" +par baseline I text - 10 0 "" - "Baselines to be demixed" +par blrange I vdbl - 10 0 "" - "Vector of ranges of baseline lengths (in m) to be demixed" +par corrtype I ptext - 10 0 "auto|cross;cross" - "correlation type to demix? Must be auto, cross, or an empty string." par freqstep I int - 10 0 1 - "Number of channels to average in result." par timestep I int - 10 0 1 - "Number of time slots to average in result." par demixfreqstep I int - 10 0 1 - "Number of channels to average in demixing." diff --git a/MAC/Deployment/data/PVSS/data/Adder.dpdef b/MAC/Deployment/data/PVSS/data/Adder.dpdef index d5bba9c187cef2edb368a64665b31428db3b4b9e..cc7152416bc03dbca5e0142bae69c3ee2ccac11c 100644 --- a/MAC/Deployment/data/PVSS/data/Adder.dpdef +++ b/MAC/Deployment/data/PVSS/data/Adder.dpdef @@ -1,8 +1,10 @@ # Adder -dropping bool +dropping bool dropped int dataProductType string +dataProduct string fileName string -locusNode string +locusNode int +writer int directory string observationName string diff --git a/MAC/Deployment/data/PVSS/data/BGPAppl.dpdef b/MAC/Deployment/data/PVSS/data/BGPAppl.dpdef index 7837587885222d7df52b592180a69e645b3fa965..dedbd889e45696a764b11f176f153b3fc2989f4b 100644 --- a/MAC/Deployment/data/PVSS/data/BGPAppl.dpdef +++ b/MAC/Deployment/data/PVSS/data/BGPAppl.dpdef @@ -1,6 +1,2 @@ -ioNodeList intArr -locusNodeList stringArr +ioNodeList intArr adderList stringArr -writerList stringArr -dataProductList stringArr -dataProductTypeList stringArr diff --git a/RTCP/IONProc/src/ION_main.cc b/RTCP/IONProc/src/ION_main.cc index f7a5bd9d198c49f743be0af017486c81e27ee91a..339a197ef1d3af035956e417650538dd1c66a69a 100644 --- a/RTCP/IONProc/src/ION_main.cc +++ b/RTCP/IONProc/src/ION_main.cc @@ -33,6 +33,7 @@ #include <Interface/Stream.h> #include <Interface/Parset.h> #include <ION_Allocator.h> +#include <SSH.h> #include <Stream/SocketStream.h> #include <StreamMultiplexer.h> #include <IONProc/Package__Version.h> @@ -49,10 +50,6 @@ #include <sys/types.h> #include <sys/mman.h> -#ifdef HAVE_LIBSSH2 -#include <libssh2.h> -#endif - #include <boost/format.hpp> #if defined HAVE_MPI @@ -390,13 +387,10 @@ int main(int argc, char **argv) } #endif -#ifdef HAVE_LIBSSH2 - int rc = libssh2_init(0); - if (rc) { - std::cerr << "libssh2 init failed: " << rc << std::endl; + if (!SSH_Init()) { + std::cerr << "SSH subsystem init failed" << std::endl; exit(1); } -#endif #if defined HAVE_BGP INIT_LOGGER_WITH_SYSINFO(str(boost::format("IONProc@%02d") % myPsetNumber)); @@ -415,9 +409,7 @@ int main(int argc, char **argv) master_thread(); -#ifdef HAVE_LIBSSH2 - libssh2_exit(); -#endif + SSH_Finalize(); #if defined HAVE_MPI MPI_Finalize(); diff --git a/RTCP/IONProc/src/RSP.h b/RTCP/IONProc/src/RSP.h index 7499850da128a3bf55d706dd0ebed06d217b94a6..adaf9e013f629cb09b4a4d20746ffc56b5d500de 100644 --- a/RTCP/IONProc/src/RSP.h +++ b/RTCP/IONProc/src/RSP.h @@ -23,6 +23,7 @@ #ifndef LOFAR_IONPROC_RSP_H #define LOFAR_IONPROC_RSP_H +#include <Common/LofarTypes.h> namespace LOFAR { namespace RTCP { @@ -34,14 +35,14 @@ namespace RTCP { struct RSP { struct Header { - uint8_t version; - uint8_t sourceInfo; - uint16_t configuration; - uint16_t station; - uint8_t nrBeamlets; - uint8_t nrBlocks; - uint32_t timestamp; - uint32_t blockSequenceNumber; + uint8 version; + uint8 sourceInfo; + uint16 configuration; + uint16 station; + uint8 nrBeamlets; + uint8 nrBlocks; + uint32 timestamp; + uint32 blockSequenceNumber; } header; char data[8130]; diff --git a/RTCP/IONProc/src/SSH.cc b/RTCP/IONProc/src/SSH.cc index dc2d734f785fcfb6adcb49870a3bd4e2bcb55e4f..be716058ec5fffa64e1bbdeb1ab950017e7c15d6 100644 --- a/RTCP/IONProc/src/SSH.cc +++ b/RTCP/IONProc/src/SSH.cc @@ -52,7 +52,7 @@ namespace LOFAR { namespace RTCP { #ifdef HAVE_LIBSSH2 - + SSHconnection::SSHconnection(const string &logPrefix, const string &hostname, const string &commandline, const string &username, const string &sshkey) : itsLogPrefix(logPrefix), @@ -323,8 +323,63 @@ void SSHconnection::commThread() LOG_INFO_STR(itsLogPrefix << "Terminated normally"); } } + +#include <openssl/crypto.h> + +std::vector< SmartPtr<Mutex> > openssl_mutexes; + +static void lock_callback(int mode, int type, const char *file, int line) +{ + (void)file; + (void)line; + + if (mode & CRYPTO_LOCK) + openssl_mutexes[type]->lock(); + else + openssl_mutexes[type]->unlock(); +} + +static unsigned long thread_id_callback() +{ + return static_cast<unsigned long>(pthread_self()); +} #endif + +bool SSH_Init() { + +#ifdef HAVE_LIBSSH2 + // initialise openssl + openssl_mutexes.resize(CRYPTO_num_locks()); + for (size_t i = 0; i < openssl_mutexes.size(); ++i) + openssl_mutexes[i] = new Mutex; + + CRYPTO_set_id_callback(&thread_id_callback); + CRYPTO_set_locking_callback(&lock_callback); + + // initialise libssh2 + int rc = libssh2_init(0); + + if (rc) + return false; +#endif + + return true; +} + +void SSH_Finalize() { +#ifdef HAVE_LIBSSH2 + // exit libssh2 + libssh2_exit(); + + // exit openssl + CRYPTO_set_locking_callback(NULL); + CRYPTO_set_id_callback(NULL); + + openssl_mutexes.clear(); +#endif +} + static void exitwitherror( const char *errorstr ) { diff --git a/RTCP/IONProc/src/SSH.h b/RTCP/IONProc/src/SSH.h index 7dda0f9d0486fc40bb5e1bd054dd228df0e0e031..226c97220cb8f398a05f98854da9f60222fddd81 100644 --- a/RTCP/IONProc/src/SSH.h +++ b/RTCP/IONProc/src/SSH.h @@ -38,6 +38,9 @@ namespace LOFAR { namespace RTCP { +bool SSH_Init(); +void SSH_Finalize(); + #ifdef HAVE_LIBSSH2 class SSHconnection { diff --git a/RTCP/IONProc/src/WallClockTime.h b/RTCP/IONProc/src/WallClockTime.h index 16e28bf729ed4ab51162115d594c9eb6b7254305..cdc4313cb6c376e37fe6305c0df5e5046ec1d151 100644 --- a/RTCP/IONProc/src/WallClockTime.h +++ b/RTCP/IONProc/src/WallClockTime.h @@ -98,7 +98,7 @@ inline void WallClockTime::cancelWait() ScopedLock scopedLock(itsMutex); itsCancelled = true; - itsCondition.signal(); + itsCondition.broadcast(); } diff --git a/RTCP/IONProc/test/CMakeLists.txt b/RTCP/IONProc/test/CMakeLists.txt index 14beaac6c093928066bee009967f3a8968dd75ea..0cc60d5c9d04f625bc12a0b35cb34b9ccde424ed 100644 --- a/RTCP/IONProc/test/CMakeLists.txt +++ b/RTCP/IONProc/test/CMakeLists.txt @@ -7,3 +7,6 @@ include_directories(${PACKAGE_SOURCE_DIR}/src) lofar_add_test(tDelayCompensation tDelayCompensation.cc) lofar_add_test(tSSH tSSH.cc) +#lofar_add_test(tRSPTimeStamp tRSPTimeStamp.cc) + +#add_subdirectory(newInputSection) diff --git a/RTCP/IONProc/test/RTCP.parset b/RTCP/IONProc/test/RTCP.parset index 2f6644efc96995d6a89c5674ee0d5ff051a2956b..3cfafd160556bc41b11fae9a048878847f4a2a92 100644 --- a/RTCP/IONProc/test/RTCP.parset +++ b/RTCP/IONProc/test/RTCP.parset @@ -1,7 +1,7 @@ OLAP.CNProc.integrationSteps = 768 OLAP.CNProc.phaseOnePsets = [0..4] OLAP.CNProc.phaseTwoPsets = [0..4] -OLAP.CNProc.phaseThreePsets = [] +OLAP.CNProc.phaseThreePsets = [0..4] OLAP.CNProc.phaseOneTwoCores = [0,1,2] OLAP.CNProc.phaseThreeCores = [0,1,2] OLAP.CNProc.partition = PartitionName @@ -14,24 +14,20 @@ OLAP.nrBitsPerSample = 16 OLAP.nrTimesInFrame = 16 OLAP.nrSecondsOfBuffer = 3.5 OLAP.CNProc.nrPPFTaps = 16 -OLAP.Storage.userName = romein -OLAP.Storage.sshIdentityFile = /home/romein/.ssh/id_rsa -OLAP.Storage.msWriter = /tmp/build/gnu_opt/RTCP/Storage/src/Storage_main +OLAP.Storage.userName = mol +OLAP.Storage.sshIdentityFile = /home/mol/.ssh/id_dsa +OLAP.Storage.msWriter = Storage_main OLAP.storageNodeList = [5*0] OLAP.OLAP_Conn.IONProc_Storage_Ports = [8300..9000] OLAP.OLAP_Conn.IONProc_Storage_Transport = TCP OLAP.OLAP_Conn.rawDataOutputOnly = F OLAP.storageStationNames = [CS004LBA,CS006LBA,RS205LBA,RS208LBA,RS306LBA] OLAP.tiedArrayStationNames = [] -Observation.Beam[0].nrTiedArrayBeams = 2 +Observation.Beam[0].nrTiedArrayBeams = 1 Observation.Beam[0].TiedArrayBeam[0].angle1 = 1.1 Observation.Beam[0].TiedArrayBeam[0].angle2 = 1.2 Observation.Beam[0].TiedArrayBeam[0].coherent = T Observation.Beam[0].TiedArrayBeam[0].dispersionMeasure = 0.0 -Observation.Beam[0].TiedArrayBeam[1].angle1 = 1.3 -Observation.Beam[0].TiedArrayBeam[1].angle2 = 1.4 -Observation.Beam[0].TiedArrayBeam[1].coherent = T -Observation.Beam[0].TiedArrayBeam[1].dispersionMeasure = 0.0 OLAP.IONProc.integrationSteps = 1 OLAP.CNProc_CoherentStokes.timeIntegrationFactor = 1 OLAP.CNProc_IncoherentStokes.timeIntegrationFactor = 1 @@ -39,7 +35,7 @@ OLAP.CNProc_CoherentStokes.channelsPerSubband = 256 OLAP.CNProc_IncoherentStokes.channelsPerSubband = 256 OLAP.CNProc_CoherentStokes.subbandsPerFile = 244 OLAP.CNProc_IncoherentStokes.subbandsPerFile = 244 -OLAP.CNProc_CoherentStokes.which = I # IQUV, XXYY +OLAP.CNProc_CoherentStokes.which = XXYY # I, IQUV, XXYY OLAP.CNProc_IncoherentStokes.which = I # IQUV OLAP.PencilInfo.storageNodeList = [] OLAP.delayCompensation = T @@ -80,9 +76,9 @@ OLAP.Storage.hosts = [localhost] Observation.DataProducts.Output_Correlated.enabled = T Observation.DataProducts.Output_Correlated.locations = [5*localhost:/tmp] Observation.DataProducts.Output_Correlated.filenames = [SB000.MS,SB001.MS,SB002.MS,SB003.MS,SB004.MS] -Observation.DataProducts.Output_Beamformed.enabled = F +Observation.DataProducts.Output_Beamformed.enabled = T Observation.DataProducts.Output_Beamformed.locations = [4*localhost:/tmp] -Observation.DataProducts.Output_Beamformed.filenames = [CV001.X,CV001.Y,CV002.X,CV002.Y] +Observation.DataProducts.Output_Beamformed.filenames = [CV001.Xr,CV001.Xi,CV002.Yr,CV002.Yi] Observation.DataProducts.Output_Trigger.enabled = F Observation.DataProducts.Output_Trigger.locations = [] Observation.DataProducts.Output_Trigger.filenames = [] diff --git a/RTCP/IONProc/test/newInputSection/CMakeLists.txt b/RTCP/IONProc/test/newInputSection/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..875538d004f36d4ac28e6b09cbd702a0ddf0324f --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/CMakeLists.txt @@ -0,0 +1 @@ +lofar_add_test(newInputSection newInputSection.cc) diff --git a/RTCP/IONProc/test/newInputSection/OMPThread.h b/RTCP/IONProc/test/newInputSection/OMPThread.h new file mode 100644 index 0000000000000000000000000000000000000000..014478942387a75afbf3552fda81c8a07de21d52 --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/OMPThread.h @@ -0,0 +1,67 @@ +#ifndef __OMPTHREAD__ +#define __OMPTHREAD__ + +#include <pthread.h> +#include <time.h> +#include <signal.h> +#include <Common/LofarLogger.h> +#include <Common/SystemCallException.h> + +namespace LOFAR { + +class OMPThread { +public: + OMPThread(): id(0), stopped(false) {} + + void start() { + id = pthread_self(); + } + + void stop() { + id = 0; + stopped = true; + } + + void kill() { + while (!stopped) { + // interrupt blocking system calls (most notably, read()) + // note that the thread will stick around until the end + // of pragma parallel, so the thread id is always valid + // once it has been set. + pthread_t oldid = id; + + if (oldid > 0) + if (pthread_kill(oldid, SIGHUP) < 0) + throw SystemCallException("pthread_kill", errno, THROW_ARGS); + + // sleep for 100ms - do NOT let us get killed here, + // because we're maintaining integrity + const struct timespec ts = { 1, 200*1000 }; + while (nanosleep( &ts, NULL ) == -1 && errno == EINTR) + ; + } + } + + class ScopedRun { + public: + ScopedRun( OMPThread &thread ): thread(thread) { + thread.start(); + } + + ~ScopedRun() { + thread.stop(); + } + + private: + OMPThread &thread; + }; + +private: + volatile pthread_t id; + volatile bool stopped; +}; + +} + +#endif + diff --git a/RTCP/IONProc/test/newInputSection/Poll.h b/RTCP/IONProc/test/newInputSection/Poll.h new file mode 100644 index 0000000000000000000000000000000000000000..b3151a7cf63c410f2c78cdb9d82e9041b8b0a669 --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/Poll.h @@ -0,0 +1,82 @@ +#ifndef POLL +#define POLL + +#include <Stream/FileDescriptorBasedStream.h> +#include <Common/SystemCallException.h> +#include <sys/epoll.h> + +class Poll: protected FileDescriptorBasedStream { +public: + Poll(); + + // Caveats: + // * Don't add a stream that's already in the set + // * You might want to call s->setnonblocking() as well, + // or your read()/write() can still block. + void add( FileDescriptorBasedStream *s, bool reading, bool writing ); + + // Note: closing the file descriptor automatically removes + // the stream from the list, see man epoll. + void remove( FileDescriptorBasedStream *s ); + + // Wait for timeout_ms milliseconds for events, and return + // the relevant streams. Up to maxevents streams are returned. + std::vector<FileDescriptorBasedStream *> poll( int timeout_ms, size_t maxevents ); +}; + +Poll::Poll() +{ + fd = epoll_create1(EPOLL_CLOEXEC); + + if( fd == -1 ) + throw SystemCallException("epoll_create1", errno, THROW_ARGS); +} + +void Poll::add( FileDescriptorBasedStream *s, bool reading, bool writing ) +{ + ASSERT( s->fd >= 0 ); + + struct epoll_event ev; + ev.events = (reading ? EPOLLIN : 0) | (writing ? EPOLLOUT : 0); + ev.data.ptr = s; + + if (epoll_ctl(fd, EPOLL_CTL_ADD, s->fd, &ev) == -1) + throw SystemCallException("epoll_ctl", errno, THROW_ARGS); +} + +void Poll::remove( FileDescriptorBasedStream *s ) +{ + ASSERT( s->fd >= 0 ); + + struct epoll_event ev; + + if (epoll_ctl(fd, EPOLL_CTL_DEL, s->fd, &ev) == -1) + throw SystemCallException("epoll_ctl", errno, THROW_ARGS); +} + +std::vector<FileDescriptorBasedStream *> Poll::poll( int timeout_ms, size_t maxevents ) +{ + // In theory, starvation can occur under heavy I/O if maxevents < #streams. If + // this is to be avoided, extend this class to employ a ready list as + // described in 'man epoll'. + std::vector<struct epoll_event> events(maxevents); + int nfds; + + nfds = epoll_wait(fd, &events[0], events.size(), timeout_ms ); + + if (nfds == -1) + throw SystemCallException("epoll_wait", errno, THROW_ARGS); + + std::vector<FileDescriptorBasedStream *> result(nfds, 0); + + for (int i = 0; i < nfds; ++i) { + FileDescriptorBasedStream *s = static_cast<FileDescriptorBasedStream*>(events[i].data.ptr); + + results[i] = s; + } + + return result; +} + +#endif + diff --git a/RTCP/IONProc/test/newInputSection/Ranges.h b/RTCP/IONProc/test/newInputSection/Ranges.h new file mode 100644 index 0000000000000000000000000000000000000000..48d3e669918a3d09a419b546a9ee4c9fa5166f0e --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/Ranges.h @@ -0,0 +1,217 @@ +#ifndef RANGES +#define RANGES + +#include <Interface/SparseSet.h> +#include <Common/LofarLogger.h> +#include <Common/LofarTypes.h> + +#include <ostream> + +namespace LOFAR { +namespace RTCP { + +// +// Thread-safe set of int64 [from,to) ranges. +// +class Ranges { +public: + Ranges(); + Ranges( void *data, size_t numBytes, int64 minHistory, bool create ); + ~Ranges(); + + // remove [0,to) + void excludeBefore( int64 to ); + + // add a range [from,to), and return whether the addition + // was succesful. + bool include( int64 from, int64 to ); + + // returns whether there is anything set in [first, last) + bool anythingBetween( int64 first, int64 last ) const; + + SparseSet<int64> sparseSet( int64 first, int64 last ) const; + +private: + struct Range { + // Write'from' before 'to' to allow the following invariant: + // + // from < to : a valid range + // from >= to : invalid range (being written) + // from = to = 0: an unused range + volatile int64 from, to; + + Range(): from(0), to(0) {} + }; + + size_t len; + Range * ranges; + Range * begin; + Range * end; + Range *head; + + // minimal history to maintain (samples newer than this + // will be maintained in favour of newly added ranges) + int64 minHistory; + +public: + static size_t size(size_t numElements) { + return numElements * sizeof(struct Range); + } + + friend std::ostream& operator<<( std::ostream &str, const Ranges &r ); +}; + +std::ostream& operator<<( std::ostream &str, const Ranges &r ) +{ + + for (struct Ranges::Range *i = r.begin; i != r.end; ++i) + if (i->to != 0) + str << "[" << i->from << ", " << i->to << ") "; + + return str; +} + +Ranges::Ranges() +: + len(0), + ranges(0), + begin(0), + end(begin), + head(begin), + minHistory(0) +{ +} + +Ranges::Ranges( void *data, size_t numBytes, int64 minHistory, bool create ) +: + len(numBytes / sizeof *ranges), + ranges(create ? new(data)Range[len] : static_cast<Range*>(data)), + begin(&ranges[0]), + end(&ranges[len]), + head(begin), + minHistory(minHistory) +{ + ASSERT( len > 0 ); +} + +Ranges::~Ranges() +{ + for (struct Range *i = begin; i != end; ++i) + i->~Range(); +} + +void Ranges::excludeBefore( int64 to ) +{ + for (struct Range *i = begin; i != end; ++i) { + if (i->to <= to) { + // erase; delete 'to' first! + i->to = 0; + i->from = 0; + continue; + } + + if (i->from > to) { + // shorten + i->from = to; + } + } +} + +bool Ranges::include( int64 from, int64 to ) +{ + ASSERTSTR( from < to, from << " < " << to ); + ASSERTSTR( from >= head->to, from << " >= " << head->to ); + + if (head->to == 0) { + // *head is unused + head->from = from; + head->to = to; + return true; + } + + if (head->to == from) { + // *head can be extended + head->to = to; + return true; + } + + // new range is needed + struct Range * const next = head + 1 == end ? begin : head + 1; + + if (next->to < to - minHistory) { + // range at 'next' is old enough to toss away + next->from = from; + next->to = to; + + head = next; + return true; + } + + // no room -- discard + return false; +} + +bool Ranges::anythingBetween( int64 first, int64 last ) const +{ + for(struct Range *i = begin; i != end; ++i) { + // read in same order as writes occur + int64 from = i->from; + int64 to = i->to; + + if (to == 0) { + // unused + continue; + } + + if (from >= to) { + // read/write conflict + continue; + } + + from = std::max( from, first ); + to = std::min( to, last ); + + if (from < to) + return true; + } + + return false; +} + +SparseSet<int64> Ranges::sparseSet( int64 first, int64 last ) const +{ + SparseSet<int64> result; + + if (first >= last) + return result; + + for(struct Range *i = begin; i != end; ++i) { + // read in same order as writes occur + int64 from = i->from; + int64 to = i->to; + + if (to == 0) { + // unused + continue; + } + + if (from >= to) { + // read/write conflict + continue; + } + + from = std::max( from, first ); + to = std::min( to, last ); + + if (from < to) + result.include(from, to); + } + + return result; +} + +} +} + +#endif + diff --git a/RTCP/IONProc/test/newInputSection/SampleBuffer.h b/RTCP/IONProc/test/newInputSection/SampleBuffer.h new file mode 100644 index 0000000000000000000000000000000000000000..73ca26e778227358ebf80e5902278f7d29cad3de --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/SampleBuffer.h @@ -0,0 +1,99 @@ +#ifndef __SAMPLEBUFFER__ +#define __SAMPLEBUFFER__ + +#include <Common/LofarLogger.h> +#include <Common/LofarConstants.h> +#include <Interface/MultiDimArray.h> +#include <Interface/Allocator.h> +#include "StationSettings.h" +#include "SharedMemory.h" +#include "Ranges.h" +#include <string> +#include <complex> + +namespace LOFAR { +namespace RTCP { + +template<typename T> class SampleBuffer { +public: + SampleBuffer( const struct StationSettings &settings, bool create ); + + struct SampleType { + std::complex<T> x; + std::complex<T> y; + }; + +private: + const std::string logPrefix; + SharedMemoryArena data; + SparseSetAllocator allocator; + + struct StationSettings *initSettings( const struct StationSettings &localSettings, bool create ); + + static size_t dataSize( const struct StationSettings &settings ) { + return sizeof settings + + NR_RSPBOARDS * (Ranges::size(settings.nrFlagRanges) + 8) + + settings.nrBeamlets * (settings.nrSamples * sizeof(T) + 128); + } + +public: + struct StationSettings *settings; + + const size_t nrBeamlets; + const size_t nrSamples; + const size_t nrFlagRanges; + + MultiDimArray<T,2> beamlets; // [subband][sample] + std::vector<Ranges> flags; // [rspboard] +}; + + +template<typename T> SampleBuffer<T>::SampleBuffer( const struct StationSettings &_settings, bool create ) +: + logPrefix(str(boost::format("[station %s %s board] [SampleBuffer] ") % _settings.station.stationName % _settings.station.antennaSet)), + data(_settings.dataKey, dataSize(_settings), create ? SharedMemoryArena::CREATE_EXCL : SharedMemoryArena::READ), + allocator(data), + settings(initSettings(_settings, create)), + + nrBeamlets(settings->nrBeamlets), + nrSamples(settings->nrSamples), + nrFlagRanges(settings->nrFlagRanges), + + beamlets(boost::extents[nrBeamlets][nrSamples], 128, allocator, false, false), + flags(settings->nrBoards) +{ + // bitmode must coincide with our template + ASSERT( sizeof(T) == N_POL * 2 * settings->station.bitmode / 8 ); + + for (size_t f = 0; f < flags.size(); f++) { + size_t numBytes = Ranges::size(nrFlagRanges); + + flags[f] = Ranges(static_cast<int64*>(allocator.allocate(numBytes, 8)), numBytes, nrSamples, create); + } + + LOG_INFO_STR( logPrefix << "Initialised" ); +} + +template<typename T> struct StationSettings *SampleBuffer<T>::initSettings( const struct StationSettings &localSettings, bool create ) +{ + //struct StationSettings *sharedSettings = allocator.allocateTyped<struct StationSettings>(); + struct StationSettings *sharedSettings = allocator.allocateTyped(); + + if (create) { + // register settings + LOG_INFO_STR( logPrefix << "Registering " << localSettings.station ); + *sharedSettings = localSettings; + } else { + // verify settings + ASSERT( *sharedSettings == localSettings ); + LOG_INFO_STR( logPrefix << "Connected to " << localSettings.station ); + } + + return sharedSettings; +} + +} +} + +#endif + diff --git a/RTCP/IONProc/test/newInputSection/SharedMemory.h b/RTCP/IONProc/test/newInputSection/SharedMemory.h new file mode 100644 index 0000000000000000000000000000000000000000..4de23a0a141b7d740a468010995e8c3a665552fd --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/SharedMemory.h @@ -0,0 +1,149 @@ +#ifndef SHARED_MEMORY +#define SHARED_MEMORY + +#include <Common/Exception.h> +#include <Common/SystemCallException.h> +#include <Common/LofarLogger.h> +#include <Interface/Allocator.h> + +#include <sys/ipc.h> +#include <sys/shm.h> +#include <sys/stat.h> +#include <unistd.h> +#include <time.h> + +namespace LOFAR { +namespace RTCP { + +class SharedMemoryArena: public FixedArena { +public: + EXCEPTION_CLASS(TimeOutException, LOFAR::Exception); + + enum Mode { + CREATE, + CREATE_EXCL, + READ, + READWRITE + }; + + SharedMemoryArena( key_t key, size_t size, Mode mode = CREATE, time_t timeout = 60 ); + ~SharedMemoryArena(); + + template <typename T> T* ptr( size_t offset = 0 ) const { + return reinterpret_cast<T*>(reinterpret_cast<char*>(itsBegin) + offset); + } + +private: + const key_t key; + const Mode mode; + int shmid; +}; + +template<typename T> class SharedStruct { +public: + SharedStruct( key_t key, bool create = false, time_t timeout = 60 ); + + T &get() { + return *data.ptr<T>(); + } + + T &get() const { + return *data.ptr<T>(); + } + +private: + SharedMemoryArena data; + + SharedStruct( const SharedStruct & ); + SharedStruct &operator=( const SharedStruct & ); +}; + +SharedMemoryArena::SharedMemoryArena( key_t key, size_t size, Mode mode, time_t timeout ) +: + FixedArena(NULL, size), + key(key), + mode(mode), + shmid(-1) +{ + time_t deadline = time(0) + timeout; + int open_flags = 0, attach_flags = 0; + + switch (mode) { + case CREATE_EXCL: + open_flags |= IPC_EXCL; + case CREATE: + open_flags |= IPC_CREAT | SHM_NORESERVE | S_IRUSR | S_IWUSR; + break; + + case READ: + attach_flags |= SHM_RDONLY; + break; + + case READWRITE: + default: + break; + } + + // get/create shmid handle + for(;;) { + shmid = shmget( key, itsSize, open_flags ); + + if (shmid == -1) { + if (!timeout) + throw SystemCallException("shmget", errno, THROW_ARGS); + + if (errno != ENOENT && errno != EEXIST) + throw SystemCallException("shmget", errno, THROW_ARGS); + } else { + // attach to segment + itsBegin = shmat( shmid, NULL, attach_flags ); + + if (itsBegin != (void*)-1) + break; // success! + + if (!timeout) + throw SystemCallException("shmat", errno, THROW_ARGS); + + if (errno != EINVAL) + throw SystemCallException("shmat", errno, THROW_ARGS); + } + + // try again until the deadline + + if (time(0) >= deadline) + throw TimeOutException("shared memory", THROW_ARGS); + + if (usleep(999999) < 0) + throw SystemCallException("sleep", errno, THROW_ARGS); + } + +} + +SharedMemoryArena::~SharedMemoryArena() +{ + try { + // detach + if (shmdt(itsBegin) < 0) + throw SystemCallException("shmdt", errno, THROW_ARGS); + + // destroy + if (mode == CREATE || mode == CREATE_EXCL) + if (shmctl(shmid, IPC_RMID, NULL) < 0) + throw SystemCallException("shmctl", errno, THROW_ARGS); + + } catch (Exception &ex) { + LOG_ERROR_STR("Exception in destructor: " << ex); + } +} + +template<typename T> SharedStruct<T>::SharedStruct( key_t key, bool create, time_t timeout ) +: + data(key, sizeof(T), create ? SharedMemoryArena::CREATE : SharedMemoryArena::READWRITE, timeout) +{ +} + +} +} + +#endif + diff --git a/RTCP/IONProc/test/newInputSection/StationData.h b/RTCP/IONProc/test/newInputSection/StationData.h new file mode 100644 index 0000000000000000000000000000000000000000..1dd93434314ba4e715aa4a32a3563e4efa1d5c23 --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/StationData.h @@ -0,0 +1,390 @@ +#ifndef __STATIONDATA__ +#define __STATIONDATA__ + +#include <Common/LofarLogger.h> +#include <Stream/Stream.h> +#include <Interface/RSPTimeStamp.h> +#include <Interface/SmartPtr.h> +#include <RSP.h> +#include <WallClockTime.h> +#include "SampleBuffer.h" +#include "Ranges.h" +#include "StationSettings.h" +#include <string> +#include <cstring> +#include <omp.h> + +namespace LOFAR { +namespace RTCP { + +template<typename T> class RSPBoard { +public: + RSPBoard( Stream &inputStream, SampleBuffer<T> &buffer, unsigned boardNr, const struct StationSettings &settings ); + + const unsigned nr; + + bool readPacket(); + void writePacket(); + + static size_t packetSize( struct RSP &packet ) { + return sizeof(struct RSP::Header) + packet.header.nrBeamlets * packet.header.nrBlocks * sizeof(T); + } + +private: + const std::string logPrefix; + + Stream &inputStream; + const bool supportPartialReads; + struct RSP packet; + TimeStamp last_timestamp; + TimeStamp last_logtimestamp; + + SampleBuffer<T> &buffer; + Ranges &flags; + const struct StationSettings settings; + const size_t firstBeamlet; + + size_t nrReceived, nrBadSize, nrBadTime, nrOutOfOrder; + + void logStatistics(); +}; + +template<typename T> RSPBoard<T>::RSPBoard( Stream &inputStream, SampleBuffer<T> &buffer, unsigned boardNr, const struct StationSettings &settings ) +: + nr(boardNr), + logPrefix(str(boost::format("[station %s %s board %u] [RSPBoard] ") % settings.station.stationName % settings.station.antennaSet % nr)), + inputStream(inputStream), + supportPartialReads(dynamic_cast<SocketStream *>(&inputStream) == 0 || dynamic_cast<SocketStream &>(inputStream).protocol != SocketStream::UDP), + + buffer(buffer), + flags(buffer.flags[boardNr]), + settings(settings), + firstBeamlet(settings.nrBeamlets / settings.nrBoards * boardNr), + + nrReceived(0), + nrBadSize(0), + nrBadTime(0), + nrOutOfOrder(0) +{ +} + +template<typename T> void RSPBoard<T>::writePacket() +{ + const uint8 &nrBeamlets = packet.header.nrBeamlets; + const uint8 &nrTimeslots = packet.header.nrBlocks; + + // the timestamp is of the last read packet by definition + const TimeStamp ×tamp = last_timestamp; + + const size_t from_offset = (int64)timestamp % settings.nrSamples; + size_t to_offset = ((int64)timestamp + nrTimeslots) % settings.nrSamples; + + if (to_offset == 0) + to_offset = settings.nrSamples; + + const size_t wrap = from_offset < to_offset ? 0 : settings.nrSamples - from_offset; + + const T *beamlets = reinterpret_cast<const T*>(&packet.data); + + ASSERT( nrBeamlets <= settings.nrBeamlets / settings.nrBoards ); + + // mark data we overwrite as invalid + flags.excludeBefore(timestamp + nrTimeslots - settings.nrSamples); + + // transpose + for (uint8 b = 0; b < nrBeamlets; ++b) { + T *dst1 = &buffer.beamlets[firstBeamlet + b][from_offset]; + + if (wrap > 0) { + T *dst2 = &buffer.beamlets[firstBeamlet + b][0]; + + memcpy(dst1, beamlets, wrap * sizeof(T)); + memcpy(dst2, beamlets, to_offset * sizeof(T)); + } else { + memcpy(dst1, beamlets, nrTimeslots * sizeof(T)); + } + + beamlets += nrTimeslots; + } + + // mark as valid + flags.include(timestamp, timestamp + nrTimeslots); +} + +template<typename T> bool RSPBoard<T>::readPacket() +{ + if (supportPartialReads) { + // read header first + inputStream.read(&packet, sizeof(struct RSP::Header)); + + // read rest of packet + inputStream.read(&packet.data, packetSize(packet) - sizeof(struct RSP::Header)); + + ++nrReceived; + } else { + // read full packet at once -- numbytes will tell us how much we've actually read + size_t numbytes = inputStream.tryRead(&packet, sizeof packet); + + ++nrReceived; + + if( numbytes < sizeof(struct RSP::Header) + || numbytes != packetSize(packet) ) { + LOG_WARN_STR( logPrefix << "Packet is " << numbytes << " bytes, but should be " << packetSize(packet) << " bytes" ); + + ++nrBadSize; + return false; + } + } + + // check sanity of packet + + // detect bad timestamp + if (packet.header.timestamp == ~0U) { + ++nrBadTime; + return false; + } + + const TimeStamp timestamp(packet.header.timestamp, packet.header.blockSequenceNumber, settings.station.clock); + + // detect out-of-order data + if (timestamp < last_timestamp) { + ++nrOutOfOrder; + return false; + } + + // don't accept big jumps (>10s) in timestamp + const int64 oneSecond = settings.station.clock / 1024; + + if (last_timestamp && packet.header.timestamp > last_timestamp + 10 * oneSecond) { + ++nrBadTime; + return false; + } + + // packet was read and is sane + + last_timestamp = timestamp; + + if (timestamp > last_logtimestamp + oneSecond) { + logStatistics(); + + last_logtimestamp = timestamp; + } + + return true; +} + + +template<typename T> void RSPBoard<T>::logStatistics() +{ + LOG_INFO_STR( logPrefix << "Received " << nrReceived << " packets: " << nrOutOfOrder << " out of order, " << nrBadTime << " bad timestamps, " << nrBadSize << " bad sizes" ); + + nrReceived = 0; + nrOutOfOrder = 0; + nrBadTime = 0; + nrBadSize = 0; +} + + +class StationStreams { +public: + StationStreams( const std::string &logPrefix, const StationSettings &settings, const std::vector<std::string> &streamDescriptors ); + + void process(); + + void stop(); + +protected: + const std::string logPrefix; + const StationSettings settings; + const std::vector<std::string> streamDescriptors; + const size_t nrBoards; + + WallClockTime waiter; + + virtual void processBoard( size_t nr ) = 0; +}; + +StationStreams::StationStreams( const std::string &logPrefix, const StationSettings &settings, const std::vector<std::string> &streamDescriptors ) +: + logPrefix(logPrefix), + settings(settings), + streamDescriptors(streamDescriptors), + nrBoards(streamDescriptors.size()) +{ +} + +void StationStreams::process() +{ + std::vector<OMPThread> threads(nrBoards); + + ASSERT(nrBoards > 0); + + LOG_INFO_STR( logPrefix << "Start" ); + + #pragma omp parallel sections num_threads(2) + { + #pragma omp section + { + // start all boards + LOG_INFO_STR( logPrefix << "Starting all boards" ); + #pragma omp parallel for num_threads(nrBoards) + for (size_t i = 0; i < nrBoards; ++i) { + OMPThread::ScopedRun sr(threads[i]); + + processBoard(i); + } + } + + #pragma omp section + { + // wait until we have to stop + LOG_INFO_STR( logPrefix << "Waiting for stop signal" ); + waiter.waitForever(); + + // kill all boards + LOG_INFO_STR( logPrefix << "Stopping all boards" ); + #pragma omp parallel for num_threads(nrBoards) + for (size_t i = 0; i < nrBoards; ++i) + threads[i].kill(); + } + } + + LOG_INFO_STR( logPrefix << "End" ); +} + +void StationStreams::stop() +{ + waiter.cancelWait(); +} + + +template<typename T> class Station: public StationStreams { +public: + Station( const StationSettings &settings, const std::vector<std::string> &streamDescriptors ); + +protected: + SampleBuffer<T> buffer; + + virtual void processBoard( size_t nr ); +}; + +template<typename T> Station<T>::Station( const StationSettings &settings, const std::vector<std::string> &streamDescriptors ) +: + StationStreams(str(boost::format("[station %s %s] [Station] ") % settings.station.stationName % settings.station.antennaSet), settings, streamDescriptors), + + buffer(settings, true) +{ + LOG_INFO_STR( logPrefix << "Initialised" ); +} + +template<typename T> void Station<T>::processBoard( size_t nr ) +{ + const std::string logPrefix(str(boost::format("[station %s %s board %u] [Station] ") % settings.station.stationName % settings.station.antennaSet % nr)); + + try { + LOG_INFO_STR( logPrefix << "Connecting to " << streamDescriptors[nr] ); + SmartPtr<Stream> s = createStream(streamDescriptors[nr], true); + + LOG_INFO_STR( logPrefix << "Connecting to shared memory buffer 0x" << std::hex << settings.dataKey ); + RSPBoard<T> board(*s, buffer, nr, settings); + + LOG_INFO_STR( logPrefix << "Start" ); + + for(;;) + if (board.readPacket()) + board.writePacket(); + + } catch (Stream::EndOfStreamException &ex) { + LOG_INFO_STR( logPrefix << "End of stream"); + } catch (SystemCallException &ex) { + if (ex.error == EINTR) + LOG_INFO_STR( logPrefix << "Aborted: " << ex.what()); + else + LOG_ERROR_STR( logPrefix << "Caught Exception: " << ex); + } catch (Exception &ex) { + LOG_ERROR_STR( logPrefix << "Caught Exception: " << ex); + } + + LOG_INFO_STR( logPrefix << "End"); +} + + +template<typename T> class Generator: public StationStreams { +public: + Generator( const StationSettings &settings, const std::vector<std::string> &streamDescriptors ); + +protected: + void processBoard( size_t nr ); + + virtual void makePacket( struct RSP &header, const TimeStamp ×tamp ); +}; + +template<typename T> Generator<T>::Generator( const StationSettings &settings, const std::vector<std::string> &streamDescriptors ) +: + StationStreams(str(boost::format("[station %s %s] [Generator] ") % settings.station.stationName % settings.station.antennaSet), settings, streamDescriptors) +{ + LOG_INFO_STR( logPrefix << "Initialised" ); +} + +template<typename T> void Generator<T>::makePacket( struct RSP &packet, const TimeStamp ×tamp ) +{ + packet.header.nrBeamlets = settings.nrBeamlets / settings.nrBoards; + packet.header.nrBlocks = 16; + + packet.header.timestamp = timestamp.getSeqId(); + packet.header.blockSequenceNumber = timestamp.getBlockId(); + + int64 data = timestamp; + + memset(packet.data, data & 0xFF, sizeof packet.data); +} + +template<typename T> void Generator<T>::processBoard( size_t nr ) +{ + const std::string logPrefix(str(boost::format("[station %s %s board %u] [Generator] ") % settings.station.stationName % settings.station.antennaSet % nr)); + + try { + LOG_INFO_STR( logPrefix << "Connecting to " << streamDescriptors[nr] ); + SmartPtr<Stream> s = createStream(streamDescriptors[nr], false); + + LOG_INFO_STR( logPrefix << "Start" ); + + TimeStamp current(time(0L) + 1, 0, settings.station.clock); + for(;;) { + struct RSP packet; + + makePacket( packet, current ); + + ASSERT(RSPBoard<T>::packetSize(packet) <= sizeof packet); + + if (!waiter.waitUntil(current)) + break; + + try { + s->write(&packet, RSPBoard<T>::packetSize(packet)); + } catch (SystemCallException &ex) { + // UDP can return ECONNREFUSED or EINVAL if server does not have its port open + if (ex.error != ECONNREFUSED && ex.error != EINVAL) + throw; + } + + current += packet.header.nrBlocks; + } + } catch (Stream::EndOfStreamException &ex) { + LOG_INFO_STR( logPrefix << "End of stream"); + } catch (SystemCallException &ex) { + if (ex.error == EINTR) + LOG_INFO_STR( logPrefix << "Aborted: " << ex.what()); + else + LOG_ERROR_STR( logPrefix << "Caught Exception: " << ex); + } catch (Exception &ex) { + LOG_ERROR_STR( logPrefix << "Caught Exception: " << ex); + } + + LOG_INFO_STR( logPrefix << "End"); +} + +} +} + +#endif diff --git a/RTCP/IONProc/test/newInputSection/StationID.h b/RTCP/IONProc/test/newInputSection/StationID.h new file mode 100644 index 0000000000000000000000000000000000000000..d4fcafed0e307200581ac752f2aec199ef56c9cb --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/StationID.h @@ -0,0 +1,82 @@ +#ifndef __STATIONID__ +#define __STATIONID__ + +#include <Common/LofarLogger.h> +#include <ostream> +#include <cstdio> +#include <string> + +namespace LOFAR { +namespace RTCP { + +struct StationID { + char stationName[64]; + char antennaSet[64]; + + unsigned clock; + unsigned bitmode; + + StationID( const std::string &stationName = "", const std::string &antennaSet = "", unsigned clock = 200 * 1000 * 1000, unsigned bitmode = 16) + : + clock(clock), + bitmode(bitmode) + { + snprintf(this->stationName, sizeof this->stationName, "%s", stationName.c_str()); + snprintf(this->antennaSet, sizeof this->antennaSet, "%s", antennaSet.c_str()); + } + + bool operator==(const struct StationID &other) const { + return !strncmp(stationName, other.stationName, sizeof stationName) + && !strncmp(antennaSet, other.antennaSet, sizeof antennaSet) + && clock == other.clock + && bitmode == other.bitmode; + } + + bool operator!=(const struct StationID &other) const { + return !(*this == other); + } + + uint32 hash() const { + // convert to 32 bit value (human-readable in hexadecimal) + uint32 stationNr = 0; + + const std::string stationNameStr(stationName); + const std::string antennaSetStr(antennaSet); + + for(std::string::const_iterator c = stationNameStr.begin(); c != stationNameStr.end(); ++c) + if(*c >= '0' && *c <= '9') + stationNr = stationNr * 16 + (*c - '0'); + + uint32 antennaSetNr = 0; + + if (antennaSetStr == "HBA_ONE" || antennaSetStr == "HBA1" ) + antennaSetNr = 1; + else + antennaSetNr = 0; + + ASSERT( stationNr < (1L << 16) ); + ASSERT( antennaSetNr < (1L << 4) ); + + ASSERT( clock/1000000 == 200 || clock/1000000 == 160 ); + ASSERT( bitmode == 4 || bitmode == 8 || bitmode == 16 ); + + unsigned clockNr = clock/1000000 == 200 ? 0x20 : 0x16; + unsigned bitmodeNr = bitmode == 16 ? 0xF : bitmode; + + return (stationNr << 16) + (antennaSetNr << 12) + (clockNr << 4) + bitmodeNr; + } + +}; + +std::ostream& operator<<( std::ostream &str, const struct StationID &s ) { + str << "station " << s.stationName << " antennaset " << s.antennaSet << " clock " << s.clock/1000000 << " bitmode " << s.bitmode; + + return str; +} + +} +} + + +#endif + diff --git a/RTCP/IONProc/test/newInputSection/StationSettings.h b/RTCP/IONProc/test/newInputSection/StationSettings.h new file mode 100644 index 0000000000000000000000000000000000000000..171db9b02c906bd5b5c798961cbfdb7b6135c8dd --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/StationSettings.h @@ -0,0 +1,79 @@ +#ifndef __STATIONSETTINGS__ +#define __STATIONSETTINGS__ + +#include <Common/LofarLogger.h> +#include "StationID.h" +#include <ostream> + +namespace LOFAR { +namespace RTCP { + +#define NR_RSPBOARDS 4 + +struct StationSettings { +private: + static const unsigned currentVersion = 1; + + unsigned version; + + bool valid() const { return version == currentVersion; } + +public: + struct StationID station; + + unsigned nrBeamlets; + + size_t nrSamples; + + unsigned nrBoards; + size_t nrFlagRanges; + + key_t dataKey; + + StationSettings(); + + // read settings from shared memory, using the given stationID + StationSettings(struct StationID station); + + bool operator==(const struct StationSettings &other) const { + return station == other.station + && nrBeamlets == other.nrBeamlets + && nrSamples == other.nrSamples + && nrBoards == other.nrBoards + && nrFlagRanges == other.nrFlagRanges + && dataKey == other.dataKey; + } + +}; + +StationSettings::StationSettings() +: + version(currentVersion) +{ +} + +StationSettings::StationSettings(struct StationID station) +: + version(currentVersion), + station(station) +{ + do { + SharedStruct<struct StationSettings> shm(station.hash(), false); + + *this = shm.get(); + } while (!valid()); + + ASSERT( valid() ); +} + +std::ostream& operator<<( std::ostream &str, const struct StationSettings &s ) { + str << s.station << " beamlets: " << s.nrBeamlets << " buffer: " << (1.0 * s.nrSamples / s.station.clock * 1024) << "s"; + + return str; +} + +} +} + +#endif + diff --git a/RTCP/IONProc/test/newInputSection/TimeSync.h b/RTCP/IONProc/test/newInputSection/TimeSync.h new file mode 100644 index 0000000000000000000000000000000000000000..61f80f98cef5da81b2d85b2703ccc446d27643cf --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/TimeSync.h @@ -0,0 +1,89 @@ +#ifndef TIMESYNC +#define TIMESYNC + +#include <Common/Thread/Mutex.h> +#include <Common/Thread/Condition.h> + +namespace LOFAR { + +class TimeSync { +public: + TimeSync(); + + // set to `val' + void set( int64 val ); + + // wait for the value to be at least `val' + + // wait for the value to be at least `val' (and + // return true), or until there is no more data + // or a timeout (return false). + bool wait( int64 val ); + bool wait( int64 val, struct timespec &timeout ); + + // signal no more data + void noMoreData(); + +private: + bool stop; + int64 timestamp; + int64 waitFor; + + Mutex mutex; + Condition cond; +}; + +TimeSync::TimeSync() +: + stop(false), + timestamp(0), + waitFor(0) +{ +} + +void TimeSync::set( int64 val ) { + ScopedLock sl(mutex); + + timestamp = val; + + if (waitFor != 0 && timestamp > waitFor) + cond.signal(); +} + +bool TimeSync::wait( int64 val ) { + ScopedLock sl(mutex); + + waitFor = val; + + while (timestamp <= val && !stop) + cond.wait(mutex); + + waitFor = 0; + + return timestamp <= val; +} + +bool TimeSync::wait( int64 val, struct timespec &timeout ) { + ScopedLock sl(mutex); + + waitFor = val; + + while (timestamp <= val && !stop) + if( !cond.wait(mutex, timeout) ) + break; + + waitFor = 0; + + return timestamp <= val; +} + +void TimeSync::noMoreData() { + ScopedLock sl(mutex); + + stop = true; + cond.signal(); +} + +} + +#endif diff --git a/RTCP/IONProc/test/newInputSection/foo.cc b/RTCP/IONProc/test/newInputSection/foo.cc new file mode 100644 index 0000000000000000000000000000000000000000..3c814527f1069141787cc319c74a2cf31dbf4de0 --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/foo.cc @@ -0,0 +1,457 @@ +#include <lofar_config.h> +#include <Common/LofarLogger.h> +#include <Common/LofarConstants.h> +#include <Stream/Stream.h> +#include <Stream/SocketStream.h> +#include <RSP.h> +#include <Interface/RSPTimeStamp.h> +#include <Interface/MultiDimArray.h> +#include <Interface/SmartPtr.h> +#include <Interface/Stream.h> +#include <WallClockTime.h> +#include "SharedMemory.h" +#include "Ranges.h" +#include "OMPThread.h" +#include "mpi.h" + +#include <vector> +#include <omp.h> +#include <map> +#include <string> +#include <ostream> +#include <cstring> +#include <boost/format.hpp> + +#define NR_RSPBOARDS 4 + +using namespace LOFAR; +using namespace RTCP; + +template<typename T> struct SampleType { + std::complex<T> x; + std::complex<T> y; +}; + +struct StationID { + char stationName[64]; + char antennaSet[64]; + + unsigned clock; + unsigned bitmode; + + StationID( const std::string &stationName = "", const std::string &antennaSet = "", unsigned clock = 200 * 1000000, unsigned bitmode = 16) + : + clock(clock), + bitmode(bitmode) + { + snprintf(this->stationName, sizeof this->stationName, "%s", stationName.c_str()); + snprintf(this->antennaSet, sizeof this->antennaSet, "%s", antennaSet.c_str()); + } + + bool operator==(const struct StationID &other) const { + return !strncmp(stationName, other.stationName, sizeof stationName) + && !strncmp(antennaSet, other.antennaSet, sizeof antennaSet) + && clock == other.clock + && bitmode == other.bitmode; + } + + bool operator!=(const struct StationID &other) const { + return !(*this == other); + } + + uint32 hash() const { + // convert to 32 bit value (human-readable in hexadecimal) + uint32 stationNr = 0; + + const std::string stationNameStr(stationName); + const std::string antennaSetStr(antennaSet); + + for(std::string::const_iterator c = stationNameStr.begin(); c != stationNameStr.end(); ++c) + if(*c >= '0' && *c <= '9') + stationNr = stationNr * 16 + (*c - '0'); + + uint32 antennaSetNr = 0; + + if (antennaSetStr == "HBA_ONE" || antennaSetStr == "HBA1" ) + antennaSetNr = 1; + else + antennaSetNr = 0; + + ASSERT( stationNr < (1L << 16) ); + ASSERT( antennaSetNr < (1L << 4) ); + + ASSERT( clock/1000000 == 200 || clock/1000000 == 160 ); + ASSERT( bitmode == 4 || bitmode == 8 || bitmode == 16 ); + + unsigned clockNr = clock/1000000 == 200 ? 0x20 : 0x16; + unsigned bitmodeNr = bitmode == 16 ? 0xF : bitmode; + + return (stationNr << 16) + (antennaSetNr << 12) + (clockNr << 4) + bitmodeNr; + } + +}; + +std::ostream& operator<<( std::ostream &str, const struct StationID &s ) { + str << "station " << s.stationName << " antennaset " << s.antennaSet << " clock " << s.clock/1000000 << " bitmode " << s.bitmode; + + return str; +} + +struct StationSettings { +private: + static const unsigned currentVersion = 1; + + unsigned version; + + bool valid() const { return version == currentVersion; } + +public: + struct StationID station; + + unsigned nrBeamlets; + + size_t nrSamples; + + unsigned nrBoards; + size_t nrFlagRanges; + + key_t dataKey; + + StationSettings(); + + // read settings from shared memory, using the given stationID + StationSettings(struct StationID station); + + bool operator==(const struct StationSettings &other) const { + return station == other.station + && nrBeamlets == other.nrBeamlets + && nrSamples == other.nrSamples + && nrBoards == other.nrBoards + && nrFlagRanges == other.nrFlagRanges + && dataKey == other.dataKey; + } + +}; + +std::ostream& operator<<( std::ostream &str, const struct StationSettings &s ) { + str << s.station << " beamlets: " << s.nrBeamlets << " buffer: " << (1.0 * s.nrSamples / s.station.clock * 1024) << "s"; + + return str; +} + +StationSettings::StationSettings() +: + version(currentVersion) +{ +} + + +StationSettings::StationSettings(struct StationID station) +: + version(currentVersion), + station(station) +{ + SharedStruct<struct StationSettings> shm(station.hash(), false); + + *this = shm.get(); + + ASSERT( valid() ); +} + + +template<typename T> class SampleBuffer { +public: + SampleBuffer( const struct StationSettings &settings, bool create ); + +private: + const std::string logPrefix; + SharedMemoryArena data; + SparseSetAllocator allocator; + + struct StationSettings *initSettings( const struct StationSettings &localSettings, bool create ); + + static size_t dataSize( const struct StationSettings &settings ) { + return sizeof settings + + NR_RSPBOARDS * (Ranges::size(settings.nrFlagRanges) + 8) + + settings.nrBeamlets * (settings.nrSamples * N_POL * 2 * settings.station.bitmode / 8 + 128); + } + +public: + struct StationSettings *settings; + + const size_t nrBeamlets; + const size_t nrSamples; + const size_t nrFlagRanges; + + MultiDimArray<T,2> beamlets; // [subband][sample] + std::vector<Ranges> flags; // [rspboard] +}; + + +template<typename T> SampleBuffer<T>::SampleBuffer( const struct StationSettings &_settings, bool create ) +: + logPrefix(str(boost::format("[station %s %s board] [SampleBuffer] ") % _settings.station.stationName % _settings.station.antennaSet)), + data(_settings.dataKey, dataSize(_settings), create ? SharedMemoryArena::CREATE_EXCL : SharedMemoryArena::READ), + allocator(data), + settings(initSettings(_settings, create)), + + nrBeamlets(settings->nrBeamlets), + nrSamples(settings->nrSamples), + nrFlagRanges(settings->nrFlagRanges), + + beamlets(boost::extents[nrBeamlets][nrSamples], 128, allocator, false, create), + flags(settings->nrBoards) +{ + // bitmode must coincide with our template + ASSERT( sizeof(T) == N_POL * 2 * settings->station.bitmode / 8 ); + + // typical #slots/packet + ASSERT( settings->nrSamples % 16 == 0 ); + + for (size_t f = 0; f < flags.size(); f++) { + size_t numBytes = Ranges::size(nrFlagRanges); + + flags[f] = Ranges(static_cast<int64*>(allocator.allocate(numBytes, 8)), numBytes, nrSamples, create); + } + + LOG_INFO_STR( logPrefix << "Initialised" ); +} + +template<typename T> struct StationSettings *SampleBuffer<T>::initSettings( const struct StationSettings &localSettings, bool create ) +{ + //struct StationSettings *sharedSettings = allocator.allocateTyped<struct StationSettings>(); + struct StationSettings *sharedSettings = allocator.allocateTyped(); + + if (create) { + // register settings + LOG_INFO_STR( logPrefix << "Registering " << localSettings.station ); + *sharedSettings = localSettings; + } else { + // verify settings + ASSERT( *sharedSettings == localSettings ); + LOG_INFO_STR( logPrefix << "Connected to " << localSettings.station ); + } + + return sharedSettings; +} + +template<typename T> class RSPBoard { +public: + RSPBoard( Stream &inputStream, SampleBuffer<T> &buffer, unsigned boardNr, const struct StationSettings &settings ); + + const unsigned nr; + + bool readPacket(); + void writePacket(); + + static size_t packetSize( struct RSP &packet ) { + return sizeof(struct RSP::Header) + packet.header.nrBeamlets * packet.header.nrBlocks * sizeof(T); + } + +private: + const std::string logPrefix; + + Stream &inputStream; + const bool supportPartialReads; + struct RSP packet; + TimeStamp last_timestamp; + TimeStamp last_logtimestamp; + + SampleBuffer<T> &buffer; + Ranges &flags; + const struct StationSettings settings; + const size_t firstBeamlet; + + size_t nrReceived, nrBadSize, nrBadTime, nrOutOfOrder; + + void logStatistics(); +}; + +template<typename T> RSPBoard<T>::RSPBoard( Stream &inputStream, SampleBuffer<T> &buffer, unsigned boardNr, const struct StationSettings &settings ) +: + nr(boardNr), + logPrefix(str(boost::format("[station %s %s board %u] [RSPBoard] ") % settings.station.stationName % settings.station.antennaSet % nr)), + inputStream(inputStream), + supportPartialReads(dynamic_cast<SocketStream *>(&inputStream) == 0 || dynamic_cast<SocketStream &>(inputStream).protocol != SocketStream::UDP), + + buffer(buffer), + flags(buffer.flags[boardNr]), + settings(settings), + firstBeamlet(settings.nrBeamlets / settings.nrBoards * boardNr), + + nrReceived(0), + nrBadSize(0), + nrBadTime(0), + nrOutOfOrder(0) +{ +} + +template<typename T> void RSPBoard<T>::writePacket() +{ + const uint8 &nrBeamlets = packet.header.nrBeamlets; + const uint8 &nrTimeslots = packet.header.nrBlocks; + + ASSERT( settings.nrSamples % nrTimeslots == 0 ); + + // the timestamp is of the last read packet by definition + const TimeStamp ×tamp = last_timestamp; + + const size_t bufferOffset = (int64)timestamp % settings.nrSamples; + + const T *beamlets = reinterpret_cast<const T*>(&packet.data); + + ASSERT( nrBeamlets <= settings.nrBeamlets / settings.nrBoards ); + + // mark data we overwrite as invalid + flags.excludeBefore(timestamp + nrTimeslots - settings.nrSamples); + + // transpose + for (uint8 b = 0; b < nrBeamlets; ++b) { + T *dst = &buffer.beamlets[firstBeamlet + b][bufferOffset]; + + memcpy(dst, beamlets, nrTimeslots * sizeof(T)); + + beamlets += nrTimeslots; + } + + // mark as valid + flags.include(timestamp, timestamp + nrTimeslots); +} + +template<typename T> bool RSPBoard<T>::readPacket() +{ + if (supportPartialReads) { + // read header first + inputStream.read(&packet, sizeof(struct RSP::Header)); + + // read rest of packet + inputStream.read(&packet.data, packetSize(packet) - sizeof(struct RSP::Header)); + + ++nrReceived; + } else { + // read full packet at once -- numbytes will tell us how much we've actually read + size_t numbytes = inputStream.tryRead(&packet, sizeof packet); + + ++nrReceived; + + if( numbytes < sizeof(struct RSP::Header) + || numbytes != packetSize(packet) ) { + LOG_WARN_STR( logPrefix << "Packet is " << numbytes << " bytes, but should be " << packetSize(packet) << " bytes" ); + + ++nrBadSize; + return false; + } + } + + // check sanity of packet + + // detect bad timestamp + if (packet.header.timestamp == ~0U) { + ++nrBadTime; + return false; + } + + const TimeStamp timestamp(packet.header.timestamp, packet.header.blockSequenceNumber, settings.station.clock); + + // detect out-of-order data + if (timestamp < last_timestamp) { + ++nrOutOfOrder; + return false; + } + + // don't accept big jumps (>10s) in timestamp + const int64 oneSecond = settings.station.clock / 1024; + + if (last_timestamp && packet.header.timestamp > last_timestamp + 10 * oneSecond) { + ++nrBadTime; + return false; + } + + // packet was read and is sane + + last_timestamp = timestamp; + + if (timestamp > last_logtimestamp + oneSecond) { + logStatistics(); + + last_logtimestamp = timestamp; + } + + return true; +} + + +template<typename T> void RSPBoard<T>::logStatistics() +{ + LOG_INFO_STR( logPrefix << "Received " << nrReceived << " packets: " << nrOutOfOrder << " out of order, " << nrBadTime << " bad timestamps, " << nrBadSize << " bad sizes" ); + + nrReceived = 0; + nrOutOfOrder = 0; + nrBadTime = 0; + nrBadSize = 0; +} + + + + + + +class StationStreams { +public: + StationStreams( const std::string &logPrefix, const StationSettings &settings, const std::vector<std::string> &streamDescriptors ); + + void process(); + + void stop(); + +protected: + const std::string logPrefix; + const StationSettings settings; + const std::vector<std::string> streamDescriptors; + const size_t nrBoards; + + WallClockTime waiter; + + virtual void processBoard( size_t nr ) = 0; +}; + +StationStreams::StationStreams( const std::string &logPrefix, const StationSettings &settings, const std::vector<std::string> &streamDescriptors ) +: + logPrefix(logPrefix), + settings(settings), + streamDescriptors(streamDescriptors), + nrBoards(streamDescriptors.size()) +{ +} + +void StationStreams::process() +{ + std::vector<OMPThread> threads(nrBoards); + + ASSERT(nrBoards > 0); + + LOG_INFO_STR( logPrefix << "Start" ); + + #pragma omp parallel sections num_threads(2) + { + #pragma omp section + { + // start all boards + LOG_INFO_STR( logPrefix << "Starting all boards" ); + #pragma omp parallel for num_threads(nrBoards) + for (size_t i = 0; i < nrBoards; ++i) { + OMPThread::ScopedRun sr(threads[i]); + + processBoard(i); + } + } + + #pragma omp section + { + // wait until we have to stop + LOG_INFO_STR( logPrefix << "Waiting for stop signal" ); + waiter.waitForever(); + + // kill all boards + LOG_INFO_STR( logPrefix << "Stopping all boards" ); + #pragma omp parallel for num_threads(nrBo \ No newline at end of file diff --git a/RTCP/IONProc/test/newInputSection/newInputSection.cc b/RTCP/IONProc/test/newInputSection/newInputSection.cc new file mode 100644 index 0000000000000000000000000000000000000000..112cb940c3b13309d18802b08e215bf59139842d --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/newInputSection.cc @@ -0,0 +1,922 @@ +#include <lofar_config.h> +#include <Common/LofarLogger.h> +#include <Common/Thread/Mutex.h> +#include <Stream/Stream.h> +#include <Stream/SocketStream.h> +#include <Interface/MultiDimArray.h> +#include <Interface/Stream.h> +#include <WallClockTime.h> +#include "SharedMemory.h" +#include "Ranges.h" +#include "OMPThread.h" +#include "StationID.h" +#include "StationSettings.h" +#include "SampleBuffer.h" +#include "StationData.h" +#include "mpi.h" + +#include <vector> +#include <omp.h> +#include <string> +#include <boost/format.hpp> + +#define DURATION 60 +#define BLOCKSIZE 0.005 +#define NRSTATIONS 3 + +using namespace LOFAR; +using namespace RTCP; + +template<typename T> class SampleBufferReader { +public: + SampleBufferReader( const StationSettings &settings, const std::vector<size_t> beamlets, const TimeStamp &from, const TimeStamp &to, size_t blockSize ); + + void process( double maxDelay ); + +protected: + const StationSettings settings; + SampleBuffer<T> buffer; + + const std::vector<size_t> beamlets; + const TimeStamp from, to; + const size_t blockSize; + + virtual void copyNothing( const TimeStamp &from, const TimeStamp &to ) { (void)from, (void)to; } + + virtual void copyBeamlet( unsigned beamlet, unsigned transfer, const TimeStamp &from_ts, const T* from, size_t nrSamples ) = 0; + virtual void copyStart( const TimeStamp &from, const TimeStamp &to, size_t wrap ) { (void)from, (void)to, (void)wrap; } + + virtual void copyFlags ( unsigned transfer, const SparseSet<int64> &flags ) = 0; + virtual void copyEnd() {} + + void copy( const TimeStamp &from, const TimeStamp &to ); + +private: + WallClockTime waiter; +}; + + +template<typename T> SampleBufferReader<T>::SampleBufferReader( const StationSettings &settings, const std::vector<size_t> beamlets, const TimeStamp &from, const TimeStamp &to, size_t blockSize ) +: + settings(settings), + buffer(settings, false), + + beamlets(beamlets), + from(from), + to(to), + blockSize(blockSize) +{ + for (size_t i = 0; i < beamlets.size(); ++i) + ASSERT( beamlets[i] < buffer.nrBeamlets ); + + ASSERT( blockSize > 0 ); + ASSERT( blockSize < settings.nrSamples ); + ASSERT( from < to ); +} + + +template<typename T> void SampleBufferReader<T>::process( double maxDelay ) +{ + /*const TimeStamp maxDelay_ts(static_cast<int64>(maxDelay * settings.station.clock / 1024) + blockSize, settings.station.clock); + + const TimeStamp current(from); + + for (TimeStamp current = from; current < to; current += blockSize) { + // wait + LOG_INFO_STR("Waiting until " << (current + maxDelay_ts) << " for " << current); + waiter.waitUntil( current + maxDelay_ts ); + + // read + LOG_INFO_STR("Reading from " << current << " to " << (current + blockSize)); + copy(current, current + blockSize); + } + + LOG_INFO("Done reading data");*/ + const TimeStamp maxDelay_ts(static_cast<int64>(maxDelay * settings.station.clock / 1024) + blockSize, settings.station.clock); + + const TimeStamp current(from); + + double totalwait = 0.0; + unsigned totalnr = 0; + + double lastreport = MPI_Wtime(); + + for (TimeStamp current = from; current < to; current += blockSize) { + // wait + waiter.waitUntil( current + maxDelay_ts ); + + // read + double bs = MPI_Wtime(); + + copy(current, current + blockSize); + + totalwait += MPI_Wtime() - bs; + totalnr++; + + if (bs - lastreport > 1.0) { + double mbps = (sizeof(T) * blockSize * beamlets.size() * 8) / (totalwait/totalnr) / 1e6; + lastreport = bs; + totalwait = 0.0; + totalnr = 0; + + LOG_INFO_STR("Reading speed: " << mbps << " Mbit/s"); + } + } + + LOG_INFO("Done reading data"); +} + +template<typename T> void SampleBufferReader<T>::copy( const TimeStamp &from, const TimeStamp &to ) +{ + ASSERT( from < to ); + ASSERT( to - from < (int64)buffer.nrSamples ); + + const unsigned nrBoards = buffer.flags.size(); + +#if 0 + // check whether there is any data at all + bool data = false; + + for (unsigned b = 0; b < nrBoards; ++b) + if (buffer.flags[b].anythingBetween(from, to)) { + data = true; + break; + } + + if (!data) { + copyNothing(from, to); + return; + } +#endif + + // copy the beamlets + + size_t from_offset = (int64)from % buffer.nrSamples; + size_t to_offset = (int64)to % buffer.nrSamples; + + if (to_offset == 0) + to_offset = buffer.nrSamples; + + // wrap > 0 if we need to wrap around the end of the buffer + size_t wrap = from_offset < to_offset ? 0 : buffer.nrSamples - from_offset; + + copyStart(from, to, wrap); + + for (size_t i = 0; i < beamlets.size(); ++i) { + unsigned nr = beamlets[i]; + const T* origin = &buffer.beamlets[nr][0]; + + if (wrap > 0) { + copyBeamlet( nr, 0, from, origin + from_offset, wrap ); + copyBeamlet( nr, 1, from, origin, to_offset ); + } else { + copyBeamlet( nr, 0, from, origin + from_offset, to_offset - from_offset ); + } + } + + // copy the flags + + for (unsigned b = 0; b < nrBoards; ++b) + copyFlags( b, buffer.flags[b].sparseSet(from, to).invert(from, to) ); + + copyEnd(); +} + +Mutex MPIMutex; + +//#define USE_RMA + +#ifdef USE_RMA + +#define MULTIPLE_WINDOWS + +template<typename T> class MPISharedBuffer: public SampleBuffer<T> { +public: + MPISharedBuffer( const struct StationSettings &settings ); + + ~MPISharedBuffer(); + +private: +#ifdef MULTIPLE_WINDOWS + std::vector<MPI_Win> beamlets_windows; +#else + MPI_Win beamlets_window; +#endif +}; + +template<typename T> MPISharedBuffer<T>::MPISharedBuffer( const struct StationSettings &settings ) +: + SampleBuffer<T>(settings, false) +#ifdef MULTIPLE_WINDOWS + , beamlets_windows(NRSTATIONS) +#endif +{ +#ifdef MULTIPLE_WINDOWS + for (int i = 0; i < NRSTATIONS; ++i) { + int error = MPI_Win_create(this->beamlets.origin(), this->beamlets.num_elements() * sizeof(T), 1, MPI_INFO_NULL, MPI_COMM_WORLD, &beamlets_windows[i]); + + ASSERT(error == MPI_SUCCESS); + } +#else + int error = MPI_Win_create(this->beamlets.origin(), this->beamlets.num_elements() * sizeof(T), 1, MPI_INFO_NULL, MPI_COMM_WORLD, &beamlets_window); + + ASSERT(error == MPI_SUCCESS); +#endif +} + +template<typename T> MPISharedBuffer<T>::~MPISharedBuffer() +{ +#ifdef MULTIPLE_WINDOWS + for (int i = 0; i < NRSTATIONS; ++i) { + int error = MPI_Win_free(&beamlets_windows[i]); + + ASSERT(error == MPI_SUCCESS); + } +#else + int error = MPI_Win_free(&beamlets_window); + + ASSERT(error == MPI_SUCCESS); +#endif +} + +template<typename T> class MPISharedBufferReader { +public: + MPISharedBufferReader( const std::vector<struct StationSettings> &settings, const TimeStamp &from, const TimeStamp &to, size_t blockSize, const std::vector<size_t> &beamlets ); + + ~MPISharedBufferReader(); + + void process( double maxDelay ); + +private: + const std::vector<struct StationSettings> settings; + const TimeStamp from, to; + const size_t blockSize; + const std::vector<size_t> beamlets; + + MultiDimArray<T, 3> buffer; // [station][beamlet][sample] + +#ifdef MULTIPLE_WINDOWS + std::vector<MPI_Win> beamlets_windows; +#else + MPI_Win beamlets_window; +#endif + + WallClockTime waiter; + + void copy( const TimeStamp &from, const TimeStamp &to ); +}; + +template<typename T> MPISharedBufferReader<T>::MPISharedBufferReader( const std::vector<struct StationSettings> &settings, const TimeStamp &from, const TimeStamp &to, size_t blockSize, const std::vector<size_t> &beamlets ) +: + settings(settings), + from(from), + to(to), + blockSize(blockSize), + beamlets(beamlets), + + buffer(boost::extents[settings.size()][beamlets.size()][blockSize], 128, heapAllocator, false, false) +#ifdef MULTIPLE_WINDOWS + , beamlets_windows(settings.size()) +#endif +{ + ASSERT( settings.size() > 0 ); + ASSERT( from.getClock() == to.getClock() ); + ASSERT( settings[0].station.clock == from.getClock()); + + for (size_t i = 0; i < settings.size(); ++i) { + ASSERT(settings[i].station.clock == settings[0].station.clock); + ASSERT(settings[i].station.clock == from.getClock()); + ASSERT(settings[i].station.bitmode == settings[0].station.bitmode); + + ASSERT(settings[i].nrSamples > blockSize); + } + +#ifdef MULTIPLE_WINDOWS + for (int i = 0; i < settings.size(); ++i) { + int error = MPI_Win_create(MPI_BOTTOM, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &beamlets_windows[i]); + + ASSERT(error == MPI_SUCCESS); + } +#else + int error = MPI_Win_create(MPI_BOTTOM, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &beamlets_window); + + ASSERT(error == MPI_SUCCESS); +#endif +} + +template<typename T> MPISharedBufferReader<T>::~MPISharedBufferReader() +{ +#ifdef MULTIPLE_WINDOWS + for (int i = 0; i < settings.size(); ++i) { + int error = MPI_Win_free(&beamlets_windows[i]); + + ASSERT(error == MPI_SUCCESS); + } +#else + int error = MPI_Win_free(&beamlets_window); + + ASSERT(error == MPI_SUCCESS); +#endif +} + +template<typename T> void MPISharedBufferReader<T>::process( double maxDelay ) +{ + const TimeStamp maxDelay_ts(static_cast<int64>(maxDelay * settings[0].station.clock / 1024) + blockSize, settings[0].station.clock); + + const TimeStamp current(from); + + double totalwait = 0.0; + unsigned totalnr = 0; + + double lastreport = MPI_Wtime(); + + for (TimeStamp current = from; current < to; current += blockSize) { + // wait + //LOG_INFO_STR("Waiting until " << (current + maxDelay_ts) << " for " << current); + waiter.waitUntil( current + maxDelay_ts ); + + // read + //LOG_INFO_STR("Reading from " << current << " to " << (current + blockSize)); + double bs = MPI_Wtime(); + + copy(current, current + blockSize); + + totalwait += MPI_Wtime() - bs; + totalnr++; + + if (bs - lastreport > 1.0) { + double mbps = (sizeof(T) * blockSize * beamlets.size() * 8) / (totalwait/totalnr) / 1e6; + lastreport = bs; + totalwait = 0.0; + totalnr = 0; + + LOG_INFO_STR("Reading speed: " << mbps << " Mbit/s"); + } + } + + LOG_INFO("Done reading data"); +} + +template<typename T> void MPISharedBufferReader<T>::copy( const TimeStamp &from, const TimeStamp &to ) +{ + int error; + +#ifdef MULTIPLE_WINDOWS + for (int i = 0; i < settings.size(); ++i) { + error = MPI_Win_lock( MPI_LOCK_SHARED, i, MPI_MODE_NOCHECK, beamlets_windows[i] ); + ASSERT(error == MPI_SUCCESS); + } +#endif + + for (size_t s = 0; s < settings.size(); ++s) { +#ifndef MULTIPLE_WINDOWS + error = MPI_Win_lock( MPI_LOCK_SHARED, s, MPI_MODE_NOCHECK, beamlets_window ); + ASSERT(error == MPI_SUCCESS); +#endif + + //LOG_INFO_STR("Copying from station " << s); + const struct StationSettings settings = this->settings[s]; + + size_t from_offset = (int64)from % settings.nrSamples; + size_t to_offset = (int64)to % settings.nrSamples; + + if (to_offset == 0) + to_offset = settings.nrSamples; + + size_t wrap = from_offset < to_offset ? 0 : settings.nrSamples - from_offset; + + for (size_t i = 0; i < beamlets.size(); ++i) { + unsigned nr = beamlets[i]; + + size_t origin = nr * settings.nrSamples; + + if (wrap > 0) { + //if (i==0) LOG_INFO_STR("Reading wrapped data"); +#ifdef MULTIPLE_WINDOWS + error = MPI_Get( &buffer[s][i][0], wrap * sizeof(T), MPI_CHAR, s, (origin + from_offset) * sizeof(T), wrap * sizeof(T), MPI_CHAR, beamlets_windows[s] ); +#else + error = MPI_Get( &buffer[s][i][0], wrap * sizeof(T), MPI_CHAR, s, (origin + from_offset) * sizeof(T), wrap * sizeof(T), MPI_CHAR, beamlets_window ); +#endif + + ASSERT(error == MPI_SUCCESS); + +#ifdef MULTIPLE_WINDOWS + error = MPI_Get( &buffer[s][i][wrap], to_offset * sizeof(T), MPI_CHAR, s, origin * sizeof(T), to_offset * sizeof(T), MPI_CHAR, beamlets_windows[s] ); +#else + error = MPI_Get( &buffer[s][i][wrap], to_offset * sizeof(T), MPI_CHAR, s, origin * sizeof(T), to_offset * sizeof(T), MPI_CHAR, beamlets_window ); +#endif + + ASSERT(error == MPI_SUCCESS); + } else { + // higher performance by splitting into multiple requests if block size is large -- formula yet unknown + //size_t partSize = (to_offset - from_offset) / 2 + 1; + size_t partSize = to_offset - from_offset; + + for (size_t x = from_offset; x < to_offset; x += partSize) { + size_t y = std::min(x + partSize, to_offset); + +#ifdef MULTIPLE_WINDOWS + error = MPI_Get( &buffer[s][i][x - from_offset], (y - x) * sizeof(T), MPI_CHAR, s, (origin + x) * sizeof(T), (y - x) * sizeof(T), MPI_CHAR, beamlets_windows[s] ); +#else + error = MPI_Get( &buffer[s][i][x - from_offset], (y - x) * sizeof(T), MPI_CHAR, s, (origin + x) * sizeof(T), (y - x) * sizeof(T), MPI_CHAR, beamlets_window ); +#endif + + ASSERT(error == MPI_SUCCESS); + } + } + } + +#ifndef MULTIPLE_WINDOWS + error = MPI_Win_unlock( s, beamlets_window ); + ASSERT(error == MPI_SUCCESS); +#endif + } + +#ifdef MULTIPLE_WINDOWS + for (int i = 0; i < settings.size(); ++i) { + error = MPI_Win_unlock( i, beamlets_windows[i] ); + ASSERT(error == MPI_SUCCESS); + } +#endif +} +#else + +template<typename T> class MPISendStation: public SampleBufferReader<T> { +public: + MPISendStation( const struct StationSettings &settings, const TimeStamp &from, const TimeStamp &to, size_t blockSize, const std::vector<size_t> &beamlets, unsigned destRank ); + + struct Header { + StationID station; + + bool data; + size_t wrap; + + int64 from, to; + + size_t nrBeamlets; + size_t nrFlags; + + size_t flagsSize; + }; + + union tag_t { + struct { + unsigned type:2; + unsigned beamlet:10; + unsigned transfer:3; + } bits; + + int value; + + tag_t(): value(0) {} + }; + + enum tag_types { CONTROL = 0, BEAMLET = 1, FLAGS = 2 }; + +protected: + const unsigned destRank; + + std::vector<MPI_Request> requests; + size_t nrRequests; + + Matrix<char> flagsData; + + virtual void copyNothing( const TimeStamp &from, const TimeStamp &to ); + virtual void copyStart( const TimeStamp &from, const TimeStamp &to, size_t wrap ); + virtual void copyBeamlet( unsigned beamlet, unsigned transfer, const TimeStamp &from_ts, const T* from, size_t nrSamples ); + virtual void copyFlags ( unsigned transfer, const SparseSet<int64> &flags ); + virtual void copyEnd(); + + size_t flagsSize() const { + return sizeof(uint32_t) + this->settings.nrFlagRanges * sizeof(int64) * 2; + } +}; + + +template<typename T> MPISendStation<T>::MPISendStation( const struct StationSettings &settings, const TimeStamp &from, const TimeStamp &to, size_t blockSize, const std::vector<size_t> &beamlets, unsigned destRank ) +: + SampleBufferReader<T>(settings, beamlets, from, to, blockSize), + destRank(destRank), + requests(this->buffer.flags.size() + beamlets.size() * 2, 0), + nrRequests(0), + flagsData(this->buffer.flags.size(), flagsSize()) +{ +} + + +template<typename T> void MPISendStation<T>::copyNothing( const TimeStamp &from, const TimeStamp &to ) +{ + LOG_INFO_STR( "No valid data!" ); + + Header header; + header.station = this->settings.station; + header.data = false; + header.wrap = 0; + header.from = from; + header.to = to; + header.nrBeamlets = 0; + header.nrFlags = 0; + + { + ScopedLock sl(MPIMutex); + + union tag_t tag; + + tag.bits.type = CONTROL; + + int error = MPI_Isend(&header, sizeof header, MPI_CHAR, destRank, tag.value, MPI_COMM_WORLD, &requests[nrRequests++]); + ASSERT(error == MPI_SUCCESS); + } +} + + +template<typename T> void MPISendStation<T>::copyStart( const TimeStamp &from, const TimeStamp &to, size_t wrap ) +{ + Header header; + header.station = this->settings.station; + header.data = true; + header.wrap = wrap; + header.from = from; + header.to = to; + header.nrBeamlets = this->beamlets.size(); + header.nrFlags = this->buffer.flags.size(); + header.flagsSize = this->flagsSize(); + + { + ScopedLock sl(MPIMutex); + + int error = MPI_Isend(&header, sizeof header, MPI_CHAR, destRank, 0, MPI_COMM_WORLD, &requests[nrRequests++]); + + ASSERT(error == MPI_SUCCESS); + } + + //LOG_INFO( "Header sent" ); +} + + +template<typename T> void MPISendStation<T>::copyBeamlet( unsigned beamlet, unsigned transfer, const TimeStamp &from_ts, const T* from, size_t nrSamples) +{ + (void)from_ts; + + ScopedLock sl(MPIMutex); + + union tag_t tag; + + tag.bits.type = BEAMLET; + tag.bits.beamlet = beamlet; + tag.bits.transfer = transfer; + + int error = MPI_Isend( + (void*)from, nrSamples * sizeof(T), MPI_CHAR, + destRank, tag.value, + MPI_COMM_WORLD, &requests[nrRequests++]); + + ASSERT(error == MPI_SUCCESS); +} + + +template<typename T> void MPISendStation<T>::copyFlags( unsigned transfer, const SparseSet<int64> &flags ) +{ + //LOG_INFO_STR( "Copy flags for beamlets [" << fromBeamlet << ", " << toBeamlet << "): " << (100.0 * flags.count() / this->blockSize) << "% " << flags ); + ssize_t numBytes = flags.marshall(&flagsData[transfer][0], flagsSize()); + + ASSERT(numBytes >= 0); + + union tag_t tag; + + tag.bits.type = FLAGS; + tag.bits.transfer = transfer; + + { + ScopedLock sl(MPIMutex); + + int error = MPI_Isend( + (void*)&flagsData[transfer][0], flagsSize(), MPI_CHAR, + destRank, tag.value, + MPI_COMM_WORLD, &requests[nrRequests++]); + + ASSERT(error == MPI_SUCCESS); + } +} + + +template<typename T> void MPISendStation<T>::copyEnd() +{ + int flag = false; + std::vector<MPI_Status> statusses(nrRequests); + + while (!flag) { + { + ScopedLock sl(MPIMutex); + + int error = MPI_Testall(nrRequests, &requests[0], &flag, &statusses[0]); + + ASSERT(error == MPI_SUCCESS); + } + + // can't hold lock indefinitely + pthread_yield(); + } + + //LOG_INFO( "Copy done"); + + nrRequests = 0; +} + + +template<typename T> class MPIReceiveStation { +public: + MPIReceiveStation( const struct StationSettings &settings, const std::vector<int> stationRanks, const std::vector<size_t> &beamlets, size_t blockSize ); + + void receiveBlock(); + +private: + const struct StationSettings settings; + const std::vector<int> stationRanks; + +public: + const std::vector<size_t> beamlets; + const size_t blockSize; + MultiDimArray<T, 3> samples; // [station][beamlet][sample] + Matrix< SparseSet<int64> > flags; // [station][board] +}; + +template<typename T> MPIReceiveStation<T>::MPIReceiveStation( const struct StationSettings &settings, const std::vector<int> stationRanks, const std::vector<size_t> &beamlets, size_t blockSize ) +: + settings(settings), + stationRanks(stationRanks), + beamlets(beamlets), + blockSize(blockSize), + samples(boost::extents[stationRanks.size()][beamlets.size()][blockSize], 128, heapAllocator, false, false), + flags(stationRanks.size(), settings.nrBoards) +{ +} + + +template<typename T> void MPIReceiveStation<T>::receiveBlock() +{ + struct MPISendStation<T>::Header header; + + int error; + + size_t nrRequests = 0; + std::vector<MPI_Request> header_requests(stationRanks.size()); + std::vector<struct MPISendStation<T>::Header> headers(stationRanks.size()); + + // post receives for all headers + + for (size_t nr = 0; nr < stationRanks.size(); ++nr) { + typename MPISendStation<T>::tag_t tag; + + // receive the header + + tag.bits.type = MPISendStation<T>::CONTROL; + + error = MPI_Irecv(&headers[nr], sizeof header, MPI_CHAR, stationRanks[nr], tag.value, MPI_COMM_WORLD, &header_requests[nr]); + ASSERT(error == MPI_SUCCESS); + } + + // process stations in the order in which we receive the headers + + std::vector<MPI_Request> requests(beamlets.size() * 2 * stationRanks.size()); + std::vector<MPI_Status> statusses(beamlets.size() * 2 * stationRanks.size()); + Matrix< std::vector<char> > flagData(stationRanks.size(), settings.nrBoards); // [station][board][data] + + for (size_t i = 0; i < stationRanks.size(); ++i) { + int nr; + + // wait for any header request to finish + error = MPI_Waitany(header_requests.size(), &header_requests[0], &nr, MPI_STATUS_IGNORE); + ASSERT(error == MPI_SUCCESS); + + typename MPISendStation<T>::tag_t tag; + + // check the header + + const struct MPISendStation<T>::Header header = headers[nr]; + + ASSERT(header.to - header.from == (int64)blockSize); + ASSERT(header.wrap < blockSize); + + if (!header.data) + continue; + + // post receives for the beamlets + + ASSERT(header.nrBeamlets == beamlets.size()); + + tag.value = 0; // reset + tag.bits.type = MPISendStation<T>::BEAMLET; + + int rank = stationRanks[nr]; + + for (size_t beamlet = 0; beamlet < header.nrBeamlets; ++beamlet) { + tag.bits.beamlet = beamlet; + tag.bits.transfer = 0; + + error = MPI_Irecv( + &samples[nr][beamlet][0], sizeof(T) * (header.wrap ? header.wrap : blockSize), MPI_CHAR, + rank, tag.value, + MPI_COMM_WORLD, &requests[nrRequests++]); + + ASSERT(error == MPI_SUCCESS); + + if (header.wrap > 0) { + tag.bits.transfer = 1; + + error = MPI_Irecv( + &samples[nr][beamlet][header.wrap], sizeof(T) * (blockSize - header.wrap), MPI_CHAR, + rank, tag.value, + MPI_COMM_WORLD, &requests[nrRequests++]); + + ASSERT(error == MPI_SUCCESS); + } + } + + // post receives for the flags + + ASSERT(header.nrFlags == settings.nrBoards); + + tag.value = 0; // reset + tag.bits.type = MPISendStation<T>::FLAGS; + + for (size_t board = 0; board < header.nrFlags; ++board) { + tag.bits.transfer = board; + + flagData[nr][board].resize(header.flagsSize); + + error = MPI_Irecv( + &flagData[nr][0][0], header.flagsSize, MPI_CHAR, + rank, tag.value, + MPI_COMM_WORLD, &requests[nrRequests++]); + + ASSERT(error == MPI_SUCCESS); + } + } + + // wait for all transfers to finish + + if (nrRequests > 0) { + error = MPI_Waitall(nrRequests, &requests[0], &statusses[0]); + ASSERT(error == MPI_SUCCESS); + } + + // convert raw flagData to flags array + + for (size_t nr = 0; nr < stationRanks.size(); ++nr) + for (size_t board = 0; board < settings.nrBoards; ++board) + flags[nr][board].unmarshall(&flagData[nr][board][0]); + +} +#endif + +void sighandler(int) +{ + /* no-op */ +} + +int main( int argc, char **argv ) +{ + size_t clock = 200*1000*1000; + + typedef SampleBuffer<int16>::SampleType SampleT; + const TimeStamp from(time(0L) + 1, 0, clock); + const TimeStamp to(time(0L) + 1 + DURATION, 0, clock); + const size_t blockSize = BLOCKSIZE * clock / 1024; + std::map<unsigned, std::vector<size_t> > beamlets; + + struct StationID stationID("RS106", "LBA", clock, 16); + struct StationSettings settings; + + settings.station = stationID; + settings.nrBeamlets = 244; + settings.nrBoards = 4; + + settings.nrSamples = (5 * stationID.clock / 1024);// & ~0xFL; + settings.nrFlagRanges = 64; + + settings.dataKey = stationID.hash(); + + INIT_LOGGER(argv[0]); + + if (MPI_Init(&argc, &argv) != MPI_SUCCESS) { + LOG_ERROR_STR("MPI_Init failed"); + return 1; + } + + int nrHosts, rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &nrHosts); + +#ifdef USE_RMA + int nrStations = NRSTATIONS; +#else + int nrStations = NRSTATIONS; +#endif + + for (unsigned i = 0; i < 244; ++i) + beamlets[nrStations + i % (nrHosts - nrStations)].push_back(i); + + if (rank > nrStations - 1) { + // receiver + LOG_INFO_STR("Receiver " << rank << " starts, handling " << beamlets[rank].size() << " subbands from " << nrStations << " stations." ); + +#ifdef USE_RMA + std::vector<struct StationSettings> stations(nrStations, settings); + + { + MPISharedBufferReader<SampleT> receiver(stations, from, to, blockSize, beamlets[rank]); + + receiver.process(0.0); + } +#else + std::vector<int> stationRanks(nrStations); + + for (size_t i = 0; i < stationRanks.size(); i++) + stationRanks[i] = i; + + { + MPIReceiveStation<SampleT> receiver(settings, stationRanks, beamlets[rank], blockSize); + + for(size_t block = 0; block < (to-from)/blockSize + 1; ++block) { + receiver.receiveBlock(); + + //LOG_INFO_STR("Receiver " << rank << " received block " << block); + } + } +#endif + LOG_INFO_STR("Receiver " << rank << " done"); + + MPI_Finalize(); + return 0; + } + + omp_set_nested(true); + omp_set_num_threads(32); + + signal(SIGHUP, sighandler); + siginterrupt(SIGHUP, 1); + + std::vector<std::string> inputStreams(4); + inputStreams[0] = "udp:127.0.0.1:4346"; + inputStreams[1] = "udp:127.0.0.1:4347"; + inputStreams[2] = "udp:127.0.0.1:4348"; + inputStreams[3] = "udp:127.0.0.1:4349"; + + if(rank == 0) { + Station< SampleT > station( settings, inputStreams ); + Generator< SampleT > generator( settings, inputStreams ); + + #pragma omp parallel sections num_threads(4) + { + #pragma omp section + { station.process(); } + + #pragma omp section + { generator.process(); } + + #pragma omp section + { sleep(DURATION + 1); station.stop(); sleep(1); generator.stop(); } + + #pragma omp section + { + struct StationID lookup("RS106", "HBA0"); + struct StationSettings s(stationID); + + LOG_INFO_STR("Detected " << s); +#ifdef USE_RMA + MPISharedBuffer<SampleT> streamer(s); +#else + #pragma omp parallel for num_threads(nrHosts - nrStations) + for (int i = nrStations; i < nrHosts; ++i) { + LOG_INFO_STR("Connecting to receiver " << i ); + MPISendStation< SampleT > streamer(s, from, to, blockSize, beamlets[i], i ); + + LOG_INFO_STR("Sending to receiver " << i ); + streamer.process( 0.0 ); + } +#endif + } + } + } else { + struct StationID lookup("RS106", "HBA0"); + struct StationSettings s(stationID); + + LOG_INFO_STR("Detected " << s); +#ifdef USE_RMA + MPISharedBuffer<SampleT> streamer(s); +#else + #pragma omp parallel for num_threads(nrHosts - nrStations) + for (int i = nrStations; i < nrHosts; ++i) { + LOG_INFO_STR("Connecting to receiver " << i ); + MPISendStation< SampleT > streamer(s, from, to, blockSize, beamlets[i], i ); + + LOG_INFO_STR("Sending to receiver " << i ); + streamer.process( 0.0 ); + } +#endif + } + + MPI_Finalize(); +} diff --git a/RTCP/IONProc/test/newInputSection/newInputSection_old.cc b/RTCP/IONProc/test/newInputSection/newInputSection_old.cc new file mode 100644 index 0000000000000000000000000000000000000000..6aeb5221ab289cec77026cd775a193843d4df31c --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/newInputSection_old.cc @@ -0,0 +1,706 @@ +#include <lofar_config.h> + +#include <Common/Thread/Mutex.h> +#include <Common/LofarLogger.h> +#include <Stream/SocketStream.h> +#include "TimeSync.h" + +#include <RSP.h> +#include <Interface/RSPTimeStamp.h> + +#include <iostream> +#include <vector> + +using namespace std; +using namespace LOFAR; +using namespace RTCP; + +const unsigned nrInputs = 10; +const unsigned nrOutputs = 10; + +const float sampleClock = 8 * 1e6; +const float subbandWidth = sampleClock / 1024; +const size_t samplesPerBlock = subbandWidth * 0.05; + +const size_t packetSize = 9000; + +class Consumer; + +/* + * Lock-free buffer for RSP packets, including + * producer primitives. Supports a single producer. + */ +class PacketBuffer { +public: + struct Item { + // the converted timestamp for this packet, + // or 0 if this packet is being written. + volatile int64 timestamp; + + // the timestamp of the previous packet, + // or 0 for the first packet. + volatile int64 prev_timestamp; + + // the payload + struct RSP packet; + + Item(): timestamp(0), prev_timestamp(0) {} + }; + + class CircularPtr { + public: + CircularPtr( PacketBuffer &buffer, struct Item *item ): item(item), buffer(buffer) {} + + struct Item *item; + + void operator++() { + if (++item == buffer.end) + item = buffer.begin; + } + + void operator--() { + if (--item == buffer.begin) + item = buffer.end - 1; + } + + private: + PacketBuffer &buffer; + }; + +private: + vector<struct Item> packets; + struct Item * const begin; + struct Item * const end; + + vector<Consumer *> consumers; + Mutex consumersMutex; + + // points to where input will be written + CircularPtr head; + + // points to the packet at or right after head + CircularPtr next; + + // last recorded timestamp + int64 prev_timestamp; + +public: + PacketBuffer( size_t bufsize, unsigned sampleClock, unsigned timesPerSlot, bool realtime ); + + // timestamp of the last written packet (exclusive, + // so actually points to one timestamp beyond the youngest + // packet). + // + // (or 0 if no data has been written yet) + volatile int64 youngest; + + // timestamp of the oldest slot in the buffer (or 0 if + // the buffer has not been fully filled yet) + volatile int64 oldest; + struct Item * volatile oldest_item; + + // call before starting to write at head + void startWrite(); + + // pointer to the head packet + struct RSP &writePtr(); + + // call after completing a write at head + void stopWrite(); + + // call when data stream ends + void noMoreData(); + + // add/remove consumers + void registerConsumer( Consumer &consumer ); + void unregisterConsumer( Consumer &consumer ); + + // configuration parameters + const unsigned sampleClock; + const bool realtime; + const size_t timesPerSlot; +}; + +/* + * An abstract class describing a data processor. + */ +class PacketSink { +public: + virtual void processPacket( Consumer *consumer, const int64 ×tamp, struct RSP &packet ) = 0; + virtual void missingData( Consumer *consumer, const int64 &from, const int64 to ) = 0; +}; + +class Consumer { +public: + Consumer( PacketBuffer &buffer, PacketSink &sink ); + + void wait( int64 to, struct timespec &timeout ); + void read( int64 from, int64 to ); + +private: + PacketBuffer &buffer; + PacketSink &sink; + const bool realtime; + + PacketBuffer::CircularPtr tail; + + // needed for synchronisation + TimeSync needFrom, haveUntil; + friend class PacketBuffer; +}; + +PacketBuffer::PacketBuffer( size_t bufsize, unsigned sampleClock, unsigned timesPerSlot, bool realtime ) +: + packets(bufsize), + begin(&packets[0]), + end(&packets[bufsize]), + head(*this, begin), + next(*this, begin), + prev_timestamp(0), + + youngest(0), + oldest(0), + oldest_item(0), + + sampleClock(sampleClock), + realtime(realtime), + timesPerSlot(timesPerSlot) +{ + // head and next are distinct (once data is read) + ASSERT( bufsize >= 2 ); +} + +void PacketBuffer::startWrite() +{ + if (!realtime) { + // make sure that our consumers do not need the head.item + // that we're about to overwrite + + const int64 headTime = head.item->timestamp; + + if (headTime != 0) { + ScopedLock sl(consumersMutex); + + for (vector<Consumer *>::const_iterator i = consumers.begin(); i != consumers.end(); ++i) + (*i)->needFrom.wait( headTime + timesPerSlot ); + } + } + + // keep next pointed at the oldest packet + ++ next; + + // if we overwrite data because we wrapped around, + // we have to update oldest. + if (next.item->timestamp != 0) { + oldest_item = next.item; + oldest = next.item->timestamp; + } + + // invalidate head + head.item->timestamp = 0; +} + +struct RSP &PacketBuffer::writePtr() +{ + return head.item->packet; +} + +void PacketBuffer::stopWrite() +{ + // complete our bookkeeping on head + const struct RSP &packet = head.item->packet; + const int64 timestamp = TimeStamp(packet.header.timestamp, packet.header.blockSequenceNumber, sampleClock); + + head.item->prev_timestamp = prev_timestamp; + prev_timestamp = timestamp; + + // make head valid again + head.item->timestamp = timestamp; + + ++ head; + + // the very first packet initialises oldest + if (!oldest) { + oldest_item = head.item; + oldest = timestamp; + } + + youngest = timestamp + timesPerSlot; + + { + // readers could be waiting for this data both in realtime and in non-realtime modes + + // The consumersMutex only blocks if consumers are added or removed. + ScopedLock sl(consumersMutex); + + // make sure that our consumers unlock once we've written data they need + for (vector<Consumer *>::const_iterator i = consumers.begin(); i != consumers.end(); ++i) { + // These TimeSync locks block only if the consumer is about to wait + // for data. This is still cheaper than letting the consumers actively + // poll `youngest'. + (*i)->haveUntil.set( youngest ); + } + } +} + +void PacketBuffer::noMoreData() +{ + ScopedLock sl(consumersMutex); + + // make sure that our consumers unlock + for (vector<Consumer *>::const_iterator i = consumers.begin(); i != consumers.end(); ++i) + (*i)->haveUntil.noMoreData(); +} + +Consumer::Consumer( PacketBuffer &buffer, PacketSink &sink ) +: + buffer(buffer), + sink(sink), + realtime(buffer.realtime), + tail(buffer, 0) +{ +} + +void Consumer::wait( int64 to, struct timespec &timeout ) +{ + if (!realtime) { + // sync will occur in read() + return; + } + + if (!haveUntil.wait( to, timeout )) { + LOG_WARN_STR( "Data arrived too late for " << to ); + } else { + LOG_DEBUG_STR( "Data arrived on time for " << to ); + } +} + +void Consumer::read( int64 from, int64 to ) +{ + /* + * Read data from the circular buffer, lock free + * under the following conditions: + * - running in real-time mode + * - no logging anywhere + * + * This means that any information we retrieve + * from `buffer' can already be outdated at the next + * memory access. So we often store a local copy. Also, we + * let the buffer cache the following info, and update it + * atomically: + * + * youngest: timestamp of the latest packet that was written + * oldest: timestamp of the oldest packet still in the buffer + * oldest_item: pointer to the oldest item + * + * Each item has the following properties: + * + * timestamp: timestamp of the first sample, or 0 if this packet + * is either not used yet or being overwritten. + * prev_timestamp: + * timestamp of the previous received packet, needed + * to detect some forms of packet loss. + * item: pointer to the payload. + * + * Note that even dereferencing multiple properties of the same + * item might yield results from different packets, if the + * writer passed by in between reads. + * + * In the cases that multiple properties are needed, make sure + * that the code functions correctly if a second property comes + * from a later packet than the first property. + */ + ASSERT(from < to); + + /* + * Sync with writer if needed + */ + + if (!realtime) { + // reserve all data since from + needFrom.set( from ); + + // make sure all data (that exists) + // until to is available. + haveUntil.wait( to ); + } + + const int64 youngest = buffer.youngest; + + /* + * We will exit once we encounter data up to 'to' + * or later. So we have to make sure that exists, + * as we cannot count on more data to arrive. + */ + + if (youngest <= from) { + // no data available -- don't look for it + LOG_DEBUG_STR( "Data loss because no data is available" ); + sink.missingData( this, from, to ); + return; + } + + if (youngest < to) { + // partial data available -- only look for what might exist + LOG_DEBUG_STR( "Data loss because end is not available" ); + sink.missingData( this, youngest, to ); + + to = youngest; + } + + /* + * Make an initial guess where to start looking. + * We just have to make sure that we do not jump + * between [from, to) as we'd be forced to consider + * anything before `tail' as a loss. + */ + + if (!tail.item) { + // We'll need to scan -- start at the oldest data + tail.item = buffer.oldest_item; + } else { + // We'll continue from the last read() + // + // Rewind if needed. Note that prev_timestamp is only 0 + // if there was no previous packet. + while( tail.item->prev_timestamp >= from ) { + LOG_DEBUG_STR("Rewinding, am " << (tail.item->prev_timestamp - from) << " ahead"); + -- tail; + } + } + + /* + * Locate any data we can find and process it. + * + * We drop out of the loop when we either: + * - have all our data (from == to) + * - notice that the rest of the data is lost + */ + + while( from < to ) { + const int64 tailTime = tail.item->timestamp; + + if (tailTime == 0) { + // invalid or no data + + // prevent infinite loops if we run in lock-step with the writer + if (buffer.oldest >= to) + break; + + ++ tail; + continue; + } + + if (tailTime < from) { + // data is not useful to us + ++ tail; + continue; + } + + if (tailTime >= to) { + // Two possibilities: + // 1. Genuine data loss beyond the end of the packet. + // 2. Writer gained on us and wrote a new packet here. + + if (tail.item->prev_timestamp < from) { + // 1: this packet belongs here, so the loss + // spans across the end of the packet. + break; + } + + // 2: We're somewhere we should not be, most likely + // because the writer overwrote these packets. + + ASSERT(realtime); + + if (buffer.oldest >= to) { + // there is no data for us anymore + break; + } + + // valid data after head + LOG_DEBUG_STR( "Sync with head" ); + tail.item = buffer.oldest_item; + continue; + } + + if (tailTime != from) { + // data loss + LOG_DEBUG_STR( "Data loss within packet" ); + sink.missingData( this, from, tailTime ); + } + + // a packet! + sink.processPacket( this, tailTime, tail.item->packet ); + + /* + * If the writer did *anything* with this packet, the + * timestamp will have been changed. + */ + + if (tailTime != tail.item->timestamp) { + // we got interrupted + ASSERT(realtime); + + // mark packet as missing + LOG_DEBUG_STR( "Data loss due to read/write conflict" ); + sink.missingData( this, tailTime, tailTime + buffer.timesPerSlot ); + } + + // look for the next packet + from = tailTime + buffer.timesPerSlot; + + // no need to reconsider this packet + ++ tail; + } + + if (from < to) { + LOG_DEBUG_STR( "Data loss at end of packet" ); + sink.missingData( this, from, to ); + } +} + +void PacketBuffer::registerConsumer( Consumer &consumer ) +{ + ScopedLock sl(consumersMutex); + + consumers.push_back(&consumer); +} + +void PacketBuffer::unregisterConsumer( Consumer &consumer ) +{ + ScopedLock sl(consumersMutex); + + for (vector<Consumer*>::iterator i = consumers.begin(); i != consumers.end(); ++i) { + if (*i == &consumer) { + consumers.erase(i); + break; + } + } +} + +#if 0 +/* + * Input from one RSP board + */ +class RSPBoardInput { +public: + RSPBoardInput( FileDescriptorBasedStream &inputStream, unsigned subbandsPerPacket, unsigned timesPerSlot, unsigned nrPolarizations, unsigned sampleSize ); + + void read(); + + PacketBuffer buffer; + +private: + FileDescriptorBasedStream &inputStream; + + const unsigned subbandsPerPacket; + const unsigned timesPerSlot; + const unsigned nrPolarizations; + const unsigned sampleSize; + + const size_t subbandSize; + const size_t packetSize; +}; + +RSPBoardInput::RSPBoardInput( FileDescriptorBasedStream &inputStream, unsigned subbandsPerPacket, unsigned timesPerSlot, unsigned nrPolarizations, unsigned sampleSize ) +: + buffer(100), + inputStream(inputStream), + packetReadOffset(0), + + subbandsPerPacket(subbandsPerPacket), + timesPerSlot(timesPerSlot), + nrPolarizations(nrPolarizations), + sampleSize(sampleSize), + + subbandSize(timesPerSlot * nrPolarizations * sampleSize), + packetSize(sizeof(struct RSP::Header) + subbandsPerPacket * subbandSize) +{ + ASSERT(packetSize <= sizeof(struct RSP)); +} + +void RSPBoardInput::read() +{ + /* + * Read packets until we block. + */ + for(;;) { + struct RSP &packet = buffer.head.item->packet; + + void *dstPtr = reinterpret_cast<char*>(packet) + packetReadOffset; + size_t bytesLeft = packetSize - packetReadOffset; + size_t numbytes; + + if (packetReadOffset == 0) { + // new packet + buffer.startWrite(); + } + + try { + numbytes = s.tryRead(dstPtr, bytesLeft); + } catch(...) { + buffer.noMoreData(); + + throw; + } + + if (numbytes == bytesLeft) { + // finished reading packet + buffer.stopWrite(); + + packetReadOffset = 0; + } else { + // packet partially read + packetReadOffset += numbytes; + } + } +} + +/* + * Generates output from data generated by multiple RSP boards. + */ +class OutputGenerator { +public: + OutputGenerator( const vector< RSPBoardInput * > &inputs, const int64 &startTime, size_t blocksize ); + + int64 next_block_start; + size_t blocksize; + + void write(); + +private: + const vector< RSPboardInput * > &inputs; + vector< PacketBuffer::Consumer * > consumers; +}; + +OutputGenerator::OutputGenerator( const vector< RSPBoardInput * > &inputs, const int64 &startTime, size_t blocksize ) +: + next_block_start(startTime), + blocksize(blocksize), + inputs(inputs), + consumers(inputs.size(),0) +{ + for( size_t i = 0; i < inputs.size(); i++ ) + consumers[i] = new PacketBuffer::Consumer(inputs[i]->buffer); +} + +void OutputGenerator::processPacket( const RSPBoardInput &input, const int64 ×tamp, struct RSP *packet ) +{ +/* + char *srcPtr = packet->data; + + for (size_t sb = 0; sb < subbandsPerPacket; sb++) { + // copy full subband + memcpy( dstPtr, srcPtr, subbandSize ); + srcPtr += subbandSize; + } +*/ +} + +void OutputGenerator::missingData( const RSPBoardInput &input, const int64 &from, const int64 to ) +{ +} + +void OutputGenerator::write() +{ + /* todo: wait for T = next_block_start + max_wait_time */ + + for( size_t i = 0; i < consumers.size(); i++ ) { + RSPBoardInput &input = *inputs[i]; + PacketBuffer &buffer = input.buffer; + PacketBuffer::Consumer &consumer = consumers[i]; + unsigned timesPerSlot = input.timesPerSlot; + } +} + + +/* + * Design: + * + * Input is read from + */ +#endif + +#include <time.h> +#include <Common/Thread/Thread.h> +#include <WallClockTime.h> + +time_t start; + +class LogPacketSink: public PacketSink +{ +public: + virtual void processPacket( Consumer *consumer, const int64 ×tamp, struct RSP &packet ) { + //LOG_INFO_STR( "Received packet " << timestamp ); + } + virtual void missingData( Consumer *consumer, const int64 &from, const int64 to ) { + LOG_INFO_STR( "Missed data from " << from << " to " << to ); + } +}; + +class ConsumerThread { +public: + void run(); + + Consumer *consumer; + Thread thread; + + ConsumerThread( Consumer *consumer ): consumer(consumer), thread( this, &ConsumerThread::run ) {} +}; + +void ConsumerThread::run() +{ + TimeStamp cts(start, 0, sampleClock); + + for (size_t i = 0; i < 16; ++i) { + TimeStamp from = cts; + TimeStamp to = cts + 16 * 1000; + LOG_INFO_STR( ">>>> Reading from " << (int64)from << " to " << (int64)to << " <<<<<" ); + + struct timespec ts = to; + + consumer->wait( to, ts ); + consumer->read( from, to ); + LOG_INFO_STR( "<<<< Done reading from " << (int64)from << " to " << (int64)to << " >>>>>" ); + cts = to; + } +} + +int main( int argc, char **argv ) { + INIT_LOGGER(argv[0]); + + bool realtime = true; + + PacketBuffer buffer( 1100, sampleClock, 16, realtime ); + LogPacketSink logsink; + Consumer consumer( buffer, logsink ); + buffer.registerConsumer(consumer); + + ConsumerThread cthread( &consumer ); + + start = time(0) + 1; + + WallClockTime wct; + + TimeStamp ts(start, 0, sampleClock); + + for (size_t i = 0; i < 100; ++i) { + wct.waitUntil(ts + 0000); + + for (size_t j = 0; j < 160; ++j) { + buffer.startWrite(); + + struct RSP &packet = buffer.writePtr(); + + packet.header.timestamp = ts.getSeqId(); + packet.header.blockSequenceNumber = ts.getBlockId(); + + buffer.stopWrite(); + + ts += 16; + } + } + + buffer.noMoreData(); +} diff --git a/RTCP/IONProc/test/newInputSection/shmtest.cc b/RTCP/IONProc/test/newInputSection/shmtest.cc new file mode 100644 index 0000000000000000000000000000000000000000..f7b0d5d804b9cc0e15de40de1c8aeffbe1877ca8 --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/shmtest.cc @@ -0,0 +1,12 @@ +#include "SharedMemory.h" +#include <unistd.h> + +using namespace LOFAR; + +int main() { + INIT_LOGGER("foo"); + + SharedMemory shm( 0x123, 4096, SharedMemory::READ ); + + sleep(5); +} diff --git a/RTCP/IONProc/test/newInputSection/tRSPTimeStamp.cc b/RTCP/IONProc/test/newInputSection/tRSPTimeStamp.cc new file mode 100644 index 0000000000000000000000000000000000000000..e4085cf3f05a5bd1d1d7e3697b2541a555bdceac --- /dev/null +++ b/RTCP/IONProc/test/newInputSection/tRSPTimeStamp.cc @@ -0,0 +1,27 @@ +#include <lofar_config.h> + +#include <Common/LofarLogger.h> +#include <Interface/RSPTimeStamp.h> + +int main( int, char **argv ) +{ + INIT_LOGGER(argv[0]); + + unsigned clock = 200 * 1000 * 1000; + + { + TimeStamp ts(0,0,clock); + + for (int64 i = 0; i < clock * 3; ++i) { + ++ts; + + #define REPORT "(ts == " << ts << ", i == " << i << ")" + + ASSERTSTR( (int64)ts == i, REPORT ); + + ASSERTSTR( ts.getSeqId() == i / clock, REPORT ); + + ASSERTSTR( ts.getBlockId() == i % clock, REPORT ); + } + } +} diff --git a/RTCP/IONProc/test/tSSH.cc b/RTCP/IONProc/test/tSSH.cc index 58f6a2a68a592578337d7ed907927f8066e3a3aa..54081d55d521b5725ab87007f349577e1cffba04 100644 --- a/RTCP/IONProc/test/tSSH.cc +++ b/RTCP/IONProc/test/tSSH.cc @@ -60,15 +60,19 @@ int main() { // can we even ssh to localhost? char sshcmd[1024]; - snprintf(sshcmd, sizeof sshcmd, "ssh %s@localhost -i %s echo system success", USER, privkey); + snprintf(sshcmd, sizeof sshcmd, "ssh %s@localhost -o PasswordAuthentication=no -o KeyboardInteractiveAuthentication=no -o NoHostAuthenticationForLocalhost=yes -i %s echo system success", USER, privkey); int ret = system(sshcmd); if (ret < 0 || WEXITSTATUS(ret) != 0) { // no -- mark this test as unrunnable and don't attempt to try with libssh then return 3; } + SSH_Init(); + test_SSHconnection(); test_forkExec(); + SSH_Finalize(); + return 0; } diff --git a/RTCP/Interface/include/Interface/Allocator.h b/RTCP/Interface/include/Interface/Allocator.h index 427471dae36720f2535b0a640a233e1e8e0e9848..fb841218f9469864257fb63690f8d0f21855e069 100644 --- a/RTCP/Interface/include/Interface/Allocator.h +++ b/RTCP/Interface/include/Interface/Allocator.h @@ -48,6 +48,25 @@ class Allocator virtual void *allocate(size_t size, size_t alignment = 1) = 0; virtual void deallocate(void *) = 0; + + /* + * Allows TYPE *foo = allocator.allocateTyped() without type-casting. + */ + class TypedAllocator { + public: + TypedAllocator(Allocator &allocator, size_t alignment): allocator(allocator), alignment(alignment) {} + + // cast-operator overloading is the only way to let C++ automatically deduce the type that we want + // to return. + template<typename T> operator T* () { + return static_cast<T*>(allocator.allocate(sizeof(T), alignment)); + } + private: + Allocator &allocator; + const size_t alignment; + }; + + TypedAllocator allocateTyped(size_t alignment = 1) { return TypedAllocator(*this, alignment); } }; @@ -68,7 +87,7 @@ class SparseSetAllocator : public Allocator public: SparseSetAllocator(const Arena &); - virtual void *allocate(size_t size, size_t alignment); + virtual void *allocate(size_t size, size_t alignment = 1); virtual void deallocate(void *); bool empty() { ScopedLock sl(mutex); return sizes.empty(); } diff --git a/RTCP/Interface/include/Interface/MultiDimArray.h b/RTCP/Interface/include/Interface/MultiDimArray.h index e5a5496180d9a457d2f8c89c9fca771c9f039ee4..1583ec3dde28b0262018c5d20bc51876be4f684a 100644 --- a/RTCP/Interface/include/Interface/MultiDimArray.h +++ b/RTCP/Interface/include/Interface/MultiDimArray.h @@ -32,17 +32,19 @@ template <typename T, unsigned DIM> class MultiDimArray : public boost::multi_ar SuperType(0, boost::detail::multi_array::extent_gen<DIM>()), allocator(&allocator), allocated_num_elements(0), - alignment(0) + alignment(0), + padToAlignment(false), + construct(true) { } - MultiDimArray(const ExtentList &extents, void *ptr) + MultiDimArray(const ExtentList &extents, void *ptr, bool construct = true) : // Use 'placement new' to force initialisation through constructors if T is a class // TODO: Not sure how to handle an exception raised by the constructor of T. The placement // delete[] will be called, but that's an empty stub. - SuperType(new(ptr)T[nrElements(extents)], extents), + SuperType(construct ? new(ptr)T[nrElements(extents)] : ptr, extents), allocator(&allocator), allocated_num_elements(nrElements(extents)), alignment(alignment), @@ -50,26 +52,30 @@ template <typename T, unsigned DIM> class MultiDimArray : public boost::multi_ar { } - MultiDimArray(const ExtentList &extents, size_t alignment = defaultAlignment(), Allocator &allocator = heapAllocator, bool padToAlignment = false) + MultiDimArray(const ExtentList &extents, size_t alignment = defaultAlignment(), Allocator &allocator = heapAllocator, bool padToAlignment = false, bool construct = true) : // Use 'placement new' to force initialisation through constructors if T is a class // TODO: Not sure how to handle an exception raised by the constructor of T. The placement // delete[] will be called, but that's an empty stub. - SuperType(new(allocator.allocate(padToAlignment ? align(nrElements(extents) * sizeof(T), alignment) : nrElements(extents) * sizeof(T), alignment))T[nrElements(extents)], extents), + SuperType(allocate(extents, alignment, allocator, padToAlignment, construct), extents), allocator(&allocator), allocated_num_elements(nrElements(extents)), alignment(alignment), - padToAlignment(padToAlignment) + padToAlignment(padToAlignment), + construct(construct) { } MultiDimArray(const MultiDimArray<T,DIM> &other) : - SuperType(other.num_elements_ ? new(other.allocator->allocate(padToAlignment ? align(other.num_elements_ * sizeof(T), other.alignment) : other.num_elements_ * sizeof(T), other.alignment))T[other.num_elements_] : 0, other.extent_list_), + SuperType(other.num_elements_ ? allocate(other.extent_list_, other.alignment, *other.allocator, other.padToAlignment, other.construct) : 0, other.extent_list_), +//new(other.allocator->allocate(padToAlignment ? align(other.num_elements_ * sizeof(T), other.alignment) : other.num_elements_ * sizeof(T), other.alignment))T[other.num_elements_] : 0, other.extent_list_), allocator(other.allocator), allocated_num_elements(other.num_elements_), - alignment(other.alignment) + alignment(other.alignment), + padToAlignment(other.padToAlignment), + construct(true) { *this = other; } @@ -97,11 +103,11 @@ template <typename T, unsigned DIM> class MultiDimArray : public boost::multi_ar return *this; } - void resize(const ExtentList &extents, size_t alignment, Allocator &allocator, bool padToAlignment = false) + void resize(const ExtentList &extents, size_t alignment, Allocator &allocator, bool padToAlignment = false, bool construct = true) { destructElements(); - MultiDimArray newArray(extents, alignment, allocator, padToAlignment); + MultiDimArray newArray(extents, alignment, allocator, padToAlignment, construct); std::swap(this->base_, newArray.base_); std::swap(this->storage_, newArray.storage_); std::swap(this->extent_list_, newArray.extent_list_); @@ -113,6 +119,8 @@ template <typename T, unsigned DIM> class MultiDimArray : public boost::multi_ar std::swap(this->allocator, newArray.allocator); std::swap(this->allocated_num_elements, newArray.allocated_num_elements); std::swap(this->alignment, newArray.alignment); + std::swap(this->padToAlignment, newArray.padToAlignment); + std::swap(this->construct, newArray.construct); } void resize(const ExtentList &extents, size_t alignment = defaultAlignment()) @@ -129,7 +137,7 @@ template <typename T, unsigned DIM> class MultiDimArray : public boost::multi_ar THROW(InterfaceException, "MultiDimArray::resizeInplace: requested to resize to " << new_num_elements << " elements, but only " << allocated_num_elements << " are allocateod"); // only destruct and construct all elements if the number of elements actually changes - if (new_num_elements != this->num_elements_) { + if (new_num_elements != this->num_elements_ && construct) { destructElements(); (void)new(this->base_)T[new_num_elements]; } @@ -186,6 +194,17 @@ template <typename T, unsigned DIM> class MultiDimArray : public boost::multi_ar size_t allocated_num_elements; unsigned alignment; bool padToAlignment; + bool construct; + + T *allocate(const ExtentList &extents, size_t alignment, Allocator &allocator, bool padToAlignment, bool construct) const { + size_t dataSize = padToAlignment + ? align(nrElements(extents) * sizeof(T), alignment) + : nrElements(extents) * sizeof(T); + + T *ptr = static_cast<T*>(allocator.allocate(dataSize, alignment)); + + return construct ? new(ptr)T[nrElements(extents)] : ptr; + } // a MultiDimArray made to replace another, using a different shape. Assumes // the original MultiDimArray allocated enough memory to hold the new @@ -200,13 +219,17 @@ template <typename T, unsigned DIM> class MultiDimArray : public boost::multi_ar allocator(0), // we did not allocate this allocated_num_elements(0), alignment(other.alignment), - padToAlignment(other.padToAlignment) + padToAlignment(other.padToAlignment), + construct(other.construct) { } void destructElements() { + if (!construct) + return; + // explicitly call the destructors in the 'placement new' array since C++ // cannot do this for us. The delete[] operator cannot know the size of the // array, and the placement delete[] operator exists (since new()[] will look diff --git a/RTCP/Interface/include/Interface/RSPTimeStamp.h b/RTCP/Interface/include/Interface/RSPTimeStamp.h index 8a605629ee7e39ed7819b1f1d8463706f6d5717b..d2cc0c22c5aab4348f95c383f8dcdb82a82731bd 100644 --- a/RTCP/Interface/include/Interface/RSPTimeStamp.h +++ b/RTCP/Interface/include/Interface/RSPTimeStamp.h @@ -42,6 +42,7 @@ namespace LOFAR { TimeStamp &setStamp(unsigned seqId, unsigned blockId); unsigned getSeqId() const; unsigned getBlockId() const; + unsigned getClock() const { return itsClockSpeed; } template <typename T> TimeStamp &operator += (T increment); template <typename T> TimeStamp &operator -= (T decrement); diff --git a/RTCP/Run/src/locations.sh.in b/RTCP/Run/src/locations.sh.in index c3b571a966463cf12618b564555a9265c44fe961..8954fabecd4f71895ec4248c0741def3a5d631d9 100644 --- a/RTCP/Run/src/locations.sh.in +++ b/RTCP/Run/src/locations.sh.in @@ -42,12 +42,12 @@ then STORAGE=$STORAGE_HOME/production/lofar/bin/Storage_main LOGDIR=/localhome/log - RUNDIR=/globalhome/lofarsystem - LOGBACKUPDIR=/globalhome/lofarsystem/log-archive + RUNDIR=$HOME + LOGBACKUPDIR=$HOME/log-archive EXTRA_KEYS=" OLAP.Storage.userName = lofarsys -OLAP.Storage.sshIdentityFile = /globalhome/lofarsystem/.ssh/id_rsa +OLAP.Storage.sshIdentityFile = /root/.ssh/id_rsa.lofarsys OLAP.Storage.msWriter=$STORAGE OLAP.Storage.AntennaSetsConf = $STORAGE_HOME/production/lofar/etc/AntennaSets.conf OLAP.Storage.AntennaFieldsDir = $STORAGE_HOME/production/lofar/etc/StaticMetaData