From 5face5ca68fd5b01dc3f0c06feed3e575a069c00 Mon Sep 17 00:00:00 2001 From: Matt Hilton Date: Tue, 29 Sep 2020 15:54:21 +0200 Subject: [PATCH 1/2] Changes for other mass defs Fixes for nemoCosmo, nemoMass to use other mass defs. Renamed some functions with M500 in the name. nemoMass still defaults to M500c, will add delta, rhoType recognition in yml file later. --- bin/nemoCosmo | 45 ++++++- bin/nemoMass | 122 ++++++------------ .../checkMassRecovery_M200m.py | 19 +-- .../makeMassFunctionPlotsCCL.py | 3 +- .../makeMassFunctionPlotsCCL_recovered.py | 3 +- nemo/MockSurvey.py | 5 +- nemo/completeness.py | 27 ++-- nemo/signals.py | 18 ++- 8 files changed, 129 insertions(+), 113 deletions(-) diff --git a/bin/nemoCosmo b/bin/nemoCosmo index 0ea7bec..468f547 100644 --- a/bin/nemoCosmo +++ b/bin/nemoCosmo @@ -131,7 +131,7 @@ def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey, verbose # Cuts on z, fixed_y_c for forced photometry mode (invalid objects will be listed but without a mass) if row['fixed_y_c'] > 0 and np.isnan(row['redshift']) == False: # Corrected for mass function steepness - massDict=signals.calcM500Fromy0(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, + massDict=signals.calcMass(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, row['redshift'], row['redshiftErr'], tenToA0 = massOptions['tenToA0'], B0 = massOptions['B0'], @@ -185,6 +185,11 @@ if __name__ == '__main__': parser.add_argument("-f", "--footprint", dest = "footprint", help="""Footprint to use, e.g., DES, HSC, KiDS (default: full). Note that the catalog will not be trimmed to match the footprint.""", default = None) + parser.add_argument("-R", "--relativistic-correction", dest = "relativisticCorrection", + action = "store_true", default = False, help = """Apply relativistic correction + to the signal modelling and completeness calculation.""") + parser.add_argument("-D", "--mass-def", dest = "massDef", default = "M500c", help = """Mass + definition to use (e.g., M500c or M200m).""") parser.add_argument("-m", "--max-samples", dest="maxSamples", help="""Maximum number of samples. If given, overrides the value given in the Cobaya configuration file.""", type = int, default = defaultMaxSamples) @@ -209,6 +214,15 @@ if __name__ == '__main__': footprintLabel=args.footprint cosmoOutDir=args.cosmoOutDir numToBurn=args.numToBurn + massDef=args.massDef + if massDef == "M500c": + rhoType="critical" + delta=500 + elif massDef == "M200m": + rhoType="matter" + delta=200 + else: + raise Exception("massDef should be either M500c or M200m (use -D M500c or -D M200m)") if args.likelihoodType == "mass": lnprob=lnprob_M500 @@ -266,16 +280,23 @@ if __name__ == '__main__': info['sampler']['mcmc']['max_samples']=maxSamples print("Setting up SNR > %.2f selection function (footprint: %s)" % (SNRCut, footprintLabel)) + if args.relativisticCorrection == True: + print("Relativistic correction will be applied") + else: + print("Relativistic correction neglected") selFn=completeness.SelFn(selFnDir, SNRCut, footprintLabel = footprintLabel, zStep = 0.1, - enableDrawSample = True, mockOversampleFactor = 1) + enableDrawSample = True, mockOversampleFactor = 1, downsampleRMS = True, + applyRelativisticCorrection = args.relativisticCorrection, + delta = delta, rhoType = rhoType) # Cut catalog according to given SNR cut and survey footprint tab=tab[np.where(tab['fixed_SNR'] > SNRCut)] inMask=selFn.checkCoordsInAreaMask(tab['RADeg'], tab['decDeg']) tab=tab[inMask] + print("%d clusters with fixed_SNR > %.1f in footprint %s" % (len(tab), SNRCut, str(footprintLabel))) # NOTE: Since we use such coarse z binning, we completely ignore photo-z errors here - # Otherwise, signals.calcPM500 throws exception when z grid is not fine enough + # Otherwise, signals.calcPMass throws exception when z grid is not fine enough tab['redshiftErr'][:]=0.0 # Define binning on (log10(fixed_y_c), redshift) grid @@ -300,11 +321,25 @@ if __name__ == '__main__': H0, Om0, Ob0, sigma8, ns = 68.0, 0.31, 0.049, 0.81, 0.965 # WebSky cosmology #tenToA0, B0, Mpivot, sigma_int = 2.65e-05, 0.0, 3.0e+14, 0.2 # WebSky scaling relation - assumed scatter - tenToA0, B0, Mpivot, sigma_int = 3.02e-05, 0.0, 3.0e+14, 0.0 # WebSky scaling relation - no scatter + if massDef == 'M500c': + tenToA0, B0, Mpivot, sigma_int = 3.02e-05, 0.0, 3.0e+14, 0.0 # WebSky scaling relation - no scatter, M500c + elif massDef == 'M200m': + tenToA0, B0, Mpivot, sigma_int = 1.7e-05, 0.0, 3.0e+14, 0.0 # WebSky scaling relation - no scatter, M200m #lnprob(H0, Om0, sigma8, Ob0, ns, p, B0, Mpivot, sigma_int) testsToRun=['H0', 'Om0', 'sigma8', 'tenToA0', 'B0', 'sigma_int'] #testsToRun=['tenToA0', 'B0', 'sigma_int'] - print("Running likelihood tests") + # For checking if recovered parameters beat the theoretical max likelihood ones + # If they do, we have bugs to fix... + theoreticalMaxLogLike=lnprob(H0, Om0, sigma8, Ob0, ns, tenToA0, B0, Mpivot, sigma_int) + print("Theoretical max log likelihood = %.3f" % (theoreticalMaxLogLike)) + print("Num clusters expected = %1f" % ((selFn.compMz*selFn.mockSurvey.clusterCount).sum())) + print("Num clusters in catalog = %d" % (len(tab))) + mode=input("Drop into interactive mode [y] or run likelihood tests for each parameter [n] ? ") + if mode == "y": + import IPython + IPython.embed() + sys.exit() + testLogLike=lnprob(70.5, 0.304, 0.825, Ob0, ns, 2.78e-05, 0.306, Mpivot, 0.0) # Any test parameters if 'H0' in testsToRun: label="H0"; var=H0 pRange=np.linspace(60, 80, 21) diff --git a/bin/nemoMass b/bin/nemoMass index b147365..a7ebbb7 100644 --- a/bin/nemoMass +++ b/bin/nemoMass @@ -97,9 +97,19 @@ def addForcedPhotometry(pathToCatalog, config, zColumnName = None, zErrColumnNam return tab #------------------------------------------------------------------------------------------------------------ -def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict): +def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey): """Calculates masses for cluster data in table. + """ + + label=mockSurvey.mdefLabel + colNames=['%s' % (label), '%sUncorr' % (label)] + if 'rescaleFactor' in massOptions.keys(): + colNames.append('%sCal' % (label)) + for c in colNames: + tab['%s' % (c)]=np.zeros(len(tab)) + tab['%s_errPlus' % (c)]=np.zeros(len(tab)) + tab['%s_errMinus' % (c)]=np.zeros(len(tab)) count=0 for row in tab: @@ -112,7 +122,7 @@ def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict): # Cuts on z, fixed_y_c for forced photometry mode (invalid objects will be listed but without a mass) if row['fixed_y_c'] > 0 and np.isnan(row['redshift']) == False: # Corrected for mass function steepness - massDict=signals.calcM500Fromy0(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, + massDict=signals.calcMass(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, row['redshift'], row['redshiftErr'], tenToA0 = massOptions['tenToA0'], B0 = massOptions['B0'], @@ -120,27 +130,13 @@ def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict): sigma_int = massOptions['sigma_int'], tckQFit = tckQFitDict[tileName], mockSurvey = mockSurvey, applyMFDebiasCorrection = True, - applyRelativisticCorrection = True, + applyRelativisticCorrection = massOptions['relativisticCorrection'], fRelWeightsDict = fRelWeightsDict[tileName]) - row['M500']=massDict['M500'] - row['M500_errPlus']=massDict['M500_errPlus'] - row['M500_errMinus']=massDict['M500_errMinus'] - # Without relativistic correction (may be useful) - massDict_noRel=signals.calcM500Fromy0(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, - row['redshift'], row['redshiftErr'], - tenToA0 = massOptions['tenToA0'], - B0 = massOptions['B0'], - Mpivot = massOptions['Mpivot'], - sigma_int = massOptions['sigma_int'], - tckQFit = tckQFitDict[tileName], mockSurvey = mockSurvey, - applyMFDebiasCorrection = True, - applyRelativisticCorrection = False, - fRelWeightsDict = fRelWeightsDict[tileName]) - row['M500NoRel']=massDict_noRel['M500'] - row['M500NoRel_errPlus']=massDict_noRel['M500_errPlus'] - row['M500NoRel_errMinus']=massDict_noRel['M500_errMinus'] + row['%s' % (label)]=massDict['%s' % (label)] + row['%s_errPlus' % (label)]=massDict['%s_errPlus' % (label)] + row['%s_errMinus' % (label)]=massDict['%s_errMinus' % (label)] # Uncorrected for mass function steepness - unCorrMassDict=signals.calcM500Fromy0(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, + unCorrMassDict=signals.calcMass(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, row['redshift'], row['redshiftErr'], tenToA0 = massOptions['tenToA0'], B0 = massOptions['B0'], @@ -148,57 +144,28 @@ def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict): sigma_int = massOptions['sigma_int'], tckQFit = tckQFitDict[tileName], mockSurvey = mockSurvey, applyMFDebiasCorrection = False, - applyRelativisticCorrection = True, + applyRelativisticCorrection = massOptions['relativisticCorrection'], fRelWeightsDict = fRelWeightsDict) - row['M500Uncorr']=unCorrMassDict['M500'] - row['M500Uncorr_errPlus']=unCorrMassDict['M500_errPlus'] - row['M500Uncorr_errMinus']=unCorrMassDict['M500_errMinus'] - # Uncorrected for mass function steepness - without relativistic correction - unCorrMassDict_noRel=signals.calcM500Fromy0(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, - row['redshift'], row['redshiftErr'], - tenToA0 = massOptions['tenToA0'], - B0 = massOptions['B0'], - Mpivot = massOptions['Mpivot'], - sigma_int = massOptions['sigma_int'], - tckQFit = tckQFitDict[tileName], mockSurvey = mockSurvey, - applyMFDebiasCorrection = False, - applyRelativisticCorrection = False, - fRelWeightsDict = fRelWeightsDict) - row['M500UncorrNoRel']=unCorrMassDict_noRel['M500'] - row['M500UncorrNoRel_errPlus']=unCorrMassDict_noRel['M500_errPlus'] - row['M500UncorrNoRel_errMinus']=unCorrMassDict_noRel['M500_errMinus'] + row['%sUncorr' % (label)]=unCorrMassDict['%s' % (label)] + row['%sUncorr_errPlus' % (label)]=unCorrMassDict['%s_errPlus' % (label)] + row['%sUncorr_errMinus' % (label)]=unCorrMassDict['%s_errMinus' % (label)] # Mass conversion - row['M200m']=signals.convertM500cToM200m(massDict['M500']*1e14, row['redshift'])/1e14 - row['M200m_errPlus']=(row['M500_errPlus']/row['M500'])*row['M200m'] - row['M200m_errMinus']=(row['M500_errMinus']/row['M500'])*row['M200m'] - row['M200mUncorr']=signals.convertM500cToM200m(unCorrMassDict['M500']*1e14, row['redshift'])/1e14 - row['M200mUncorr_errPlus']=(row['M500Uncorr_errPlus']/row['M500Uncorr'])*row['M200mUncorr'] - row['M200mUncorr_errMinus']=(row['M500Uncorr_errMinus']/row['M500Uncorr'])*row['M200mUncorr'] - # Mass conversion - no relativistic correction - row['M200mNoRel']=signals.convertM500cToM200m(massDict_noRel['M500']*1e14, row['redshift'])/1e14 - row['M200mNoRel_errPlus']=(row['M500NoRel_errPlus']/row['M500NoRel'])*row['M200mNoRel'] - row['M200mNoRel_errMinus']=(row['M500NoRel_errMinus']/row['M500NoRel'])*row['M200mNoRel'] - row['M200mUncorrNoRel']=signals.convertM500cToM200m(unCorrMassDict_noRel['M500']*1e14, row['redshift'])/1e14 - row['M200mUncorrNoRel_errPlus']=(row['M500UncorrNoRel_errPlus']/row['M500UncorrNoRel'])*row['M200mUncorrNoRel'] - row['M200mUncorrNoRel_errMinus']=(row['M500UncorrNoRel_errMinus']/row['M500UncorrNoRel'])*row['M200mUncorrNoRel'] + #row['M200m']=signals.convertM500cToM200m(massDict['M500']*1e14, row['redshift'])/1e14 + #row['M200m_errPlus']=(row['M500_errPlus']/row['M500'])*row['M200m'] + #row['M200m_errMinus']=(row['M500_errMinus']/row['M500'])*row['M200m'] + #row['M200mUncorr']=signals.convertM500cToM200m(unCorrMassDict['M500']*1e14, row['redshift'])/1e14 + #row['M200mUncorr_errPlus']=(row['M500Uncorr_errPlus']/row['M500Uncorr'])*row['M200mUncorr'] + #row['M200mUncorr_errMinus']=(row['M500Uncorr_errMinus']/row['M500Uncorr'])*row['M200mUncorr'] # Re-scaling (e.g., using richness-based weak-lensing mass calibration) - row['M500Cal']=massDict['M500']/massOptions['rescaleFactor'] - row['M500Cal_errPlus']=np.sqrt(np.power(row['M500_errPlus']/row['M500'], 2) + \ - np.power(massOptions['rescaleFactorErr']/massOptions['rescaleFactor'], 2))*row['M500Cal'] - row['M500Cal_errMinus']=np.sqrt(np.power(row['M500_errMinus']/row['M500'], 2) + \ - np.power(massOptions['rescaleFactorErr']/massOptions['rescaleFactor'], 2))*row['M500Cal'] - row['M200mCal']=signals.convertM500cToM200m(row['M500Cal']*1e14, row['redshift'])/1e14 - row['M200mCal_errPlus']=(row['M500Cal_errPlus']/row['M500Cal'])*row['M200mCal'] - row['M200mCal_errMinus']=(row['M500Cal_errMinus']/row['M500Cal'])*row['M200mCal'] - # Re-scaling (e.g., using richness-based weak-lensing mass calibration) - no relativistic correction - row['M500CalNoRel']=massDict_noRel['M500']/massOptions['rescaleFactor'] - row['M500CalNoRel_errPlus']=np.sqrt(np.power(row['M500NoRel_errPlus']/row['M500NoRel'], 2) + \ - np.power(massOptions['rescaleFactorErr']/massOptions['rescaleFactor'], 2))*row['M500CalNoRel'] - row['M500CalNoRel_errMinus']=np.sqrt(np.power(row['M500NoRel_errMinus']/row['M500NoRel'], 2) + \ - np.power(massOptions['rescaleFactorErr']/massOptions['rescaleFactor'], 2))*row['M500CalNoRel'] - row['M200mCalNoRel']=signals.convertM500cToM200m(row['M500CalNoRel']*1e14, row['redshift'])/1e14 - row['M200mCalNoRel_errPlus']=(row['M500CalNoRel_errPlus']/row['M500CalNoRel'])*row['M200mCalNoRel'] - row['M200mCalNoRel_errMinus']=(row['M500CalNoRel_errMinus']/row['M500CalNoRel'])*row['M200mCalNoRel'] + if 'rescaleFactor' in massOptions.keys(): + row['%sCal' % (label)]=massDict['%s' % (label)]/massOptions['rescaleFactor'] + row['%sCal_errPlus' % (label)]=np.sqrt(np.power(row['%s_errPlus' % (label)]/row['%s' % (label)], 2) + \ + np.power(massOptions['rescaleFactorErr']/massOptions['rescaleFactor'], 2))*row['%sCal' % (label)] + row['%sCal_errMinus' % (label)]=np.sqrt(np.power(row['%s_errMinus' % (label)]/row['%s' % (label)], 2) + \ + np.power(massOptions['rescaleFactorErr']/massOptions['rescaleFactor'], 2))*row['%sCal' % (label)] + #row['M200mCal']=signals.convertM500cToM200m(row['M500Cal']*1e14, row['redshift'])/1e14 + #row['M200mCal_errPlus']=(row['M500Cal_errPlus']/row['M500Cal'])*row['M200mCal'] + #row['M200mCal_errMinus']=(row['M500Cal_errMinus']/row['M500Cal'])*row['M200mCal'] return tab @@ -260,7 +227,7 @@ if __name__ == '__main__': nemoTab['redshiftErr']=zTab['redshiftErr'] tab=nemoTab if outFileName is None: - outFileName=optimalCatalogFileName.replace("_optimalCatalog.fits", "_M500.fits") + outFileName=optimalCatalogFileName.replace("_optimalCatalog.fits", "_mass.fits") else: @@ -268,7 +235,7 @@ if __name__ == '__main__': optimalCatalogFileName=catFileName tab=atpy.Table().read(optimalCatalogFileName) if outFileName is None: - outFileName=catFileName.replace(".fits", "_M500.fits") + outFileName=catFileName.replace(".fits", "_mass.fits") # Enter forced photometry mode if we can't find the columns we need # If we're doing forced photometry, we're going to need to set-up so we can find the filtered maps @@ -324,16 +291,7 @@ if __name__ == '__main__': else: ns=0.95 mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns) - - colsToAdd=['M500', 'M500Uncorr', 'M200m', 'M200mUncorr', - 'M500NoRel', 'M500UncorrNoRel', 'M200mNoRel', 'M200mUncorrNoRel'] - if 'rescaleFactor' in list(massOptions.keys()): - colsToAdd=colsToAdd+['M500Cal', 'M200mCal', 'M500CalNoRel', 'M200mCalNoRel'] - for c in colsToAdd: - tab.add_column(atpy.Column(np.zeros(len(tab)), c)) - tab.add_column(atpy.Column(np.zeros(len(tab)), c+"_errPlus")) - tab.add_column(atpy.Column(np.zeros(len(tab)), c+"_errMinus")) - + # Seems like multiprocessing doesn't work under slurm... # So use MPI instead: just divide up catalog among processes tab.add_column(atpy.Column(np.arange(len(tab)), "sortIndex")) @@ -345,7 +303,7 @@ if __name__ == '__main__': endIndex=len(tab) tab=tab[startIndex:endIndex] - tab=calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict) + tab=calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey) if config.MPIEnabled == True: tabList=config.comm.gather(tab, root = 0) diff --git a/examples/SOSims/validationScripts/checkMassRecovery_M200m.py b/examples/SOSims/validationScripts/checkMassRecovery_M200m.py index 103a7dd..6410962 100644 --- a/examples/SOSims/validationScripts/checkMassRecovery_M200m.py +++ b/examples/SOSims/validationScripts/checkMassRecovery_M200m.py @@ -20,6 +20,8 @@ def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey): """ + label=mockSurvey.mdefLabel + count=0 for row in tab: count=count+1 @@ -31,7 +33,7 @@ def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey): # Cuts on z, fixed_y_c for forced photometry mode (invalid objects will be listed but without a mass) if row['fixed_y_c'] > 0 and np.isnan(row['redshift']) == False: # Corrected for mass function steepness - massDict=signals.calcM500Fromy0(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, + massDict=signals.calcMass(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, row['redshift'], row['redshiftErr'], tenToA0 = massOptions['tenToA0'], B0 = massOptions['B0'], @@ -41,9 +43,9 @@ def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey): applyMFDebiasCorrection = True, applyRelativisticCorrection = True, fRelWeightsDict = fRelWeightsDict[tileName]) - row['M500']=massDict['M500'] - row['M500_errPlus']=massDict['M500_errPlus'] - row['M500_errMinus']=massDict['M500_errMinus'] + row['%s' % (label)]=massDict['%s' % (label)] + row['%s_errPlus' % (label)]=massDict['%s_errPlus' % (label)] + row['%s_errMinus' % (label)]=massDict['%s_errMinus' % (label)] return tab @@ -58,11 +60,12 @@ def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey): H0, Om0, Ob0, sigma8, ns = 68.0, 0.049+0.261, 0.049, 0.81, 0.965 TCMB=2.72548 cosmoModel=FlatLambdaCDM(H0 = H0, Om0 = Om0, Ob0 = Ob0, Tcmb0 = TCMB) -mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns) -massOptions={'tenToA0': 2.65e-05, +mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns, + rhoType = 'matter', delta = 200) +massOptions={'tenToA0': 1.7e-05, 'B0': 0.0, 'Mpivot': 3.0e+14, - 'sigma_int': 0.2} + 'sigma_int': 0.0} tckQFitDict=signals.loadQ("../MFMF_SOSim_3freq_tiles/selFn/QFit.fits") fRelWeightsDict=signals.loadFRelWeights("../MFMF_SOSim_3freq_tiles/selFn/fRelWeights.fits") @@ -112,7 +115,7 @@ def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey): x=fitTab['true_M200'] result=stats.linregress(x, y) sumSqRes=np.sum((x-y)**2) - calibFactor=np.mean(fitTab['true_M500'])/np.mean(fitTab['M500']) + calibFactor=np.mean(fitTab['true_M200'])/np.mean(fitTab['M200m']) # Scaling relation plot plotSettings.update_rcParams() diff --git a/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL.py b/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL.py index 600964f..4f12259 100644 --- a/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL.py +++ b/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL.py @@ -92,7 +92,8 @@ zMin=zBinEdges[i] zMax=zBinEdges[i+1] label='%.1f < z < %.1f' % (zMin, zMax) - shellVolumeMpc3=selFn.mockSurvey._comovingVolume(zMax)-selFn.mockSurvey._comovingVolume(zMin) + fSky=selFn.mockSurvey.areaDeg2/(4*np.pi*(180/np.pi)**2) + shellVolumeMpc3=fSky*(selFn.mockSurvey._comovingVolume(zMax)-selFn.mockSurvey._comovingVolume(zMin)) zMask=np.logical_and(selFn.mockSurvey.z >= zMin, selFn.mockSurvey.z < zMax) countsByMass=predMz[zMask, :].sum(axis = 0) diff --git a/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL_recovered.py b/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL_recovered.py index 2b488b4..9e0f61a 100644 --- a/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL_recovered.py +++ b/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL_recovered.py @@ -63,7 +63,8 @@ zMin=zBinEdges[i] zMax=zBinEdges[i+1] label='%.1f < z < %.1f' % (zMin, zMax) - shellVolumeMpc3=selFn.mockSurvey._comovingVolume(zMax)-selFn.mockSurvey._comovingVolume(zMin) + fSky=selFn.mockSurvey.areaDeg2/(4*np.pi*(180/np.pi)**2) + shellVolumeMpc3=fSky*(selFn.mockSurvey._comovingVolume(zMax)-selFn.mockSurvey._comovingVolume(zMin)) zMask=np.logical_and(selFn.mockSurvey.z >= zMin, selFn.mockSurvey.z < zMax) countsByMass=predMz[zMask, :].sum(axis = 0) diff --git a/nemo/MockSurvey.py b/nemo/MockSurvey.py index 4bc73ae..b71d1f4 100644 --- a/nemo/MockSurvey.py +++ b/nemo/MockSurvey.py @@ -110,6 +110,9 @@ def __init__(self, minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns, zSte self.mdef=ccl.halos.MassDef(self.delta, self.rhoType, c_m_relation = c_m_relation) self.transferFunction=transferFunction + # Just for convenience when used elsewhere + self.mdefLabel="M%d%s" % (self.delta, self.rhoType[0]) + self.H0=-1 self.Om0=-1 self.Ob0=-1 @@ -173,7 +176,7 @@ def update(self, H0, Om0, Ob0, sigma8, ns): self.criticalDensity=ccl.physical_constants.RHO_CRITICAL*(self.Ez*self.cosmoModel['h'])**2 for k in range(len(self.z)): # NOTE: Q fit uses theta500, as does fRel (hardcoded M500 - T relation in there) - # This bit here is not be strictly necessary, since we don't need to map on to binning + # This bit here may not be strictly necessary, since we don't need to map on to binning if self.delta != 500 or self.rhoType != "critical": interpLim_minLog10M500c=np.log10(self.mdef.translate_mass(self.cosmoModel, self.M.min(), self.a[k], self._M500cDef)) diff --git a/nemo/completeness.py b/nemo/completeness.py index 087662d..23dd930 100644 --- a/nemo/completeness.py +++ b/nemo/completeness.py @@ -76,8 +76,8 @@ class SelFn(object): def __init__(self, selFnDir, SNRCut, configFileName = None, footprintLabel = None, zStep = 0.01, tileNames = None, enableDrawSample = False, mockOversampleFactor = 1.0, - downsampleRMS = True, applyMFDebiasCorrection = True, setUpAreaMask = False, - enableCompletenessCalc = True, delta = 500, rhoType = 'critical'): + downsampleRMS = True, applyMFDebiasCorrection = True, applyRelativisticCorrection = True, + setUpAreaMask = False, enableCompletenessCalc = True, delta = 500, rhoType = 'critical'): """Initialise an object that contains a survey selection function. This class uses the output in the selFn/ directory (produced by the nemo and nemoSelFn commands) to @@ -107,6 +107,8 @@ def __init__(self, selFnDir, SNRCut, configFileName = None, footprintLabel = Non versus survey area. Downsampling speeds up completeness calculations considerably. applyMFDebiasCorrection (bool, optional): Set to False to disable the Eddington bias correction of mass estimates. Probably only useful for debugging. + applyRelativisticCorrection (bool, optional): Set to False to disable inclusion of relativistic + correction in completeness calculations. setupAreaMask (bool, optional): If True, read in the area masks so that quick position checks can be done (e.g., by checkCoordsAreInMask). enableCompletenessCalc (bool, optional): If True, set up the machinery needed to do completeness @@ -118,6 +120,7 @@ def __init__(self, selFnDir, SNRCut, configFileName = None, footprintLabel = Non self.footprintLabel=footprintLabel self.downsampleRMS=downsampleRMS self.applyMFDebiasCorrection=applyMFDebiasCorrection + self.applyRelativisticCorrection=applyRelativisticCorrection self.selFnDir=selFnDir self.zStep=zStep @@ -347,12 +350,20 @@ def update(self, H0, Om0, Ob0, sigma8, ns, scalingRelationDict = None): for i in range(len(zRange)): zk=zRange[i] k=np.argmin(abs(self.mockSurvey.z-zk)) - theta500s_zk=interpolate.splev(self.mockSurvey.log10M, self.mockSurvey.theta500Splines[k]) + if self.mockSurvey.delta != 500 or self.mockSurvey.rhoType != "critical": + log10M500s=np.log10(self.mockSurvey.mdef.translate_mass(self.mockSurvey.cosmoModel, + self.mockSurvey.M, + self.mockSurvey.a[k], + self.mockSurvey._M500cDef)) + else: + log10M500s=self.mockSurvey.log10M + theta500s_zk=interpolate.splev(log10M500s, self.mockSurvey.theta500Splines[k]) Qs_zk=interpolate.splev(theta500s_zk, self.tckQFitDict[tileName]) - fRels_zk=interpolate.splev(self.mockSurvey.log10M, self.mockSurvey.fRelSplines[k]) true_y0s_zk=tenToA0*np.power(self.mockSurvey.Ez[k], 2)*np.power(np.power(10, self.mockSurvey.log10M)/Mpivot, - 1+B0)*Qs_zk*fRels_zk - #true_y0s_zk=tenToA0*np.power(mockSurvey.Ez[k], 2)*np.power((recMassBias*np.power(10, mockSurvey.log10M))/Mpivot, 1+B0)*Qs_zk*fRels_zk + 1+B0)*Qs_zk + if self.applyRelativisticCorrection == True: + fRels_zk=interpolate.splev(log10M500s, self.mockSurvey.fRelSplines[k]) + true_y0s_zk=true_y0s_zk*fRels_zk y0Grid[i]=true_y0s_zk # For some cosmological parameters, we can still get the odd -ve y0 @@ -409,7 +420,7 @@ def projectCatalogToMz(self, tab): zErr=row['redshiftErr'] y0=row['fixed_y_c']*1e-4 y0Err=row['fixed_err_y_c']*1e-4 - P=signals.calcPM500(y0, y0Err, z, zErr, self.tckQFitDict[tileName], self.mockSurvey, + P=signals.calcPMass(y0, y0Err, z, zErr, self.tckQFitDict[tileName], self.mockSurvey, tenToA0 = tenToA0, B0 = B0, Mpivot = Mpivot, sigma_int = sigma_int, applyMFDebiasCorrection = self.applyMFDebiasCorrection, fRelWeightsDict = self.fRelDict[tileName], @@ -439,7 +450,7 @@ def projectCatalogToMz_simple(self, tab): zErr=row['redshiftErr'] y0=row['fixed_y_c']*1e-4 y0Err=row['fixed_err_y_c']*1e-4 - massDict=signals.calcM500Fromy0(y0, y0Err, z, zErr, tenToA0 = tenToA0, B0 = B0, Mpivot = Mpivot, + massDict=signals.calcMass(y0, y0Err, z, zErr, tenToA0 = tenToA0, B0 = B0, Mpivot = Mpivot, sigma_int = sigma_int, tckQFit = self.tckQFitDict[tileName], mockSurvey = self.mockSurvey, applyMFDebiasCorrection = self.applyMFDebiasCorrection, diff --git a/nemo/signals.py b/nemo/signals.py index d0ba8ae..389a58a 100644 --- a/nemo/signals.py +++ b/nemo/signals.py @@ -699,6 +699,8 @@ def y0FromLogM500(log10M500, z, tckQFit, tenToA0 = 4.95e-5, B0 = 0.08, Mpivot = Returns y0~, theta500Arcmin, Q + NOTE: Depreciated? Nothing we have uses this. + """ if type(Mpivot) == str: @@ -727,9 +729,9 @@ def y0FromLogM500(log10M500, z, tckQFit, tenToA0 = 4.95e-5, B0 = 0.08, Mpivot = return y0pred, theta500Arcmin, Q #------------------------------------------------------------------------------------------------------------ -def calcM500Fromy0(y0, y0Err, z, zErr, tckQFit, mockSurvey, tenToA0 = 4.95e-5, B0 = 0.08, Mpivot = 3e14, - sigma_int = 0.2, applyMFDebiasCorrection = True, applyRelativisticCorrection = True, - calcErrors = True, fRelWeightsDict = {148.0: 1.0}): +def calcMass(y0, y0Err, z, zErr, tckQFit, mockSurvey, tenToA0 = 4.95e-5, B0 = 0.08, Mpivot = 3e14, + sigma_int = 0.2, applyMFDebiasCorrection = True, applyRelativisticCorrection = True, + calcErrors = True, fRelWeightsDict = {148.0: 1.0}): """Returns M500 +/- errors in units of 10^14 MSun, calculated assuming a y0 - M relation (default values assume UPP scaling relation from Arnaud et al. 2010), taking into account the steepness of the mass function. The approach followed is described in H13, Section 3.2. @@ -759,16 +761,18 @@ def calcM500Fromy0(y0, y0Err, z, zErr, tckQFit, mockSurvey, tenToA0 = 4.95e-5, B if y0 > 1e-2: raise Exception('y0 is suspiciously large - probably you need to multiply by 1e-4') - P=calcPM500(y0, y0Err, z, zErr, tckQFit, mockSurvey, tenToA0 = tenToA0, B0 = B0, Mpivot = Mpivot, + P=calcPMass(y0, y0Err, z, zErr, tckQFit, mockSurvey, tenToA0 = tenToA0, B0 = B0, Mpivot = Mpivot, sigma_int = sigma_int, applyMFDebiasCorrection = applyMFDebiasCorrection, applyRelativisticCorrection = applyRelativisticCorrection, fRelWeightsDict = fRelWeightsDict) M500, errM500Minus, errM500Plus=getM500FromP(P, mockSurvey.log10M, calcErrors = calcErrors) - return {'M500': M500, 'M500_errPlus': errM500Plus, 'M500_errMinus': errM500Minus} + label=mockSurvey.mdefLabel + + return {'%s' % (label): M500, '%s_errPlus' % (label): errM500Plus, '%s_errMinus' % (label): errM500Minus} #------------------------------------------------------------------------------------------------------------ -def calcPM500(y0, y0Err, z, zErr, tckQFit, mockSurvey, tenToA0 = 4.95e-5, B0 = 0.08, Mpivot = 3e14, +def calcPMass(y0, y0Err, z, zErr, tckQFit, mockSurvey, tenToA0 = 4.95e-5, B0 = 0.08, Mpivot = 3e14, sigma_int = 0.2, applyMFDebiasCorrection = True, applyRelativisticCorrection = True, fRelWeightsDict = {148.0: 1.0}, return2D = False): """Calculates P(M500) assuming a y0 - M relation (default values assume UPP scaling relation from Arnaud @@ -776,7 +780,7 @@ def calcPM500(y0, y0Err, z, zErr, tckQFit, mockSurvey, tenToA0 = 4.95e-5, B0 = 0 in H13, Section 3.2. The binning for P(M500) is set according to the given mockSurvey, as are the assumed cosmological parameters. - This routine is used by calcM500Fromy0. + This routine is used by calcMass. If return2D == True, returns a grid of same dimensions / binning as mockSurvey.z, mockSurvey.log10M, normalised such that the sum of the values is 1. From fd92446b4a64a5bc2e49c3e9959451ec2edffaf7 Mon Sep 17 00:00:00 2001 From: Matt Hilton Date: Thu, 1 Oct 2020 11:42:21 +0200 Subject: [PATCH 2/2] Fix for two pass automasking extended sources There was a typo! It now does what it was supposed to. --- nemo/maps.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nemo/maps.py b/nemo/maps.py index 6948994..37e4473 100644 --- a/nemo/maps.py +++ b/nemo/maps.py @@ -1018,10 +1018,10 @@ def preprocessMapDict(mapDict, tileName = 'PRIMARY', diagnosticsDir = None): ASizeArcmin=row['ellipse_A']/xPixSizeArcmin if ASizeArcmin > maskRadiusArcmin: extendedSource=True - masksRadiusArcmin=ASizeArcmin + maskRadiusArcmin=ASizeArcmin rArcminMap, xBounds, yBounds=nemoCython.makeDegreesDistanceMap(rArcminMap, wcs, - row['RADeg'], row['decDeg'], - maskRadiusArcmin/60) + row['RADeg'], row['decDeg'], + maskRadiusArcmin/60) rArcminMap=rArcminMap*60 surveyMask[rArcminMap < maskRadiusArcmin]=0 if extendedSource == True: