Page History
...
Code Block | ||
---|---|---|
| ||
from psana import DataSource from psmon import publish from psmon.plots import Image,XYPlot import os, sys, time from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() os.environ['PS_SRV_NODES']='1' os.environ['PS_SMD_N_EVENTS']='1' # hack for use with "mpirun -n 5 python andor.py"to eliminate use of publish.local (allows use of "psplot ANDOR") if rank==4: # rank of SRV node when running with 5 cores. Mona probably has a more elegant way to do this. publish.init() passing exp and runnum exp=sys.argv[1] runnum=int(sys.argv[2]) mount_dir = '/sdf/data/lcls/drpsrcf/ffb' # for srcf #mount_dir = '/cds/data/drpsrcf' # for s3df xtc_dir = os.path.join(mount_dir, exp[:3], exp, 'xtc') ds = DataSource(exp=exp,run=runnum,dir=xtc_dir,intg_det='andor_vls',batch_size=1, psmon_publish=publish) # we will remove this for batch processing and use "psplot" instead # publish.local = True def my_smalldata(data_dict): if 'unaligned_andor_norm' in data_dict: andor_norm = data_dict['unaligned_andor_norm'][0] myplot = XYPlot(0,f"Andor (normalized) run:{runnum}",range(len(andor_norm)),andor_norm) publish.send('ANDOR',myplot) ds = DataSource(exp='rixx1003821',run=46,dir='/cds/data/drpsrcf/rix/rixx1003821/xtc/',intg_det='andor_vls',batch_size=1) if 'sum_atmopal' in data_dict: atmopal_sum = data_dict['sum_atmopal'] myplot = XYPlot(0,f"Atmopal (sum) run:{runnum}",range(len(atmopal_sum)), atmopal_sum) publish.send('ATMOPAL', myplot) for myrun in ds.runs(): andor = myrun.Detector('andor_vls') atmopal = myrun.Detector('atmopal') timing = myrun.Detector('timing') smd = ds.smalldata(filename='mysmallh5.h5',batch_size=1000, callbacks=[my_smalldata]) norm = 0 ndrop_inhibit = 0 sum_atmopal = None for nstep,step in enumerate(myrun.steps()): print('step:',nstep) for nevt,evt in enumerate(step.events()): andor_img = andor.raw.value(evt) atmopal_img = atmopal.raw.image(evt) if atmopal_img is not None: if sum_atmopal is None: sum_atmopal = atmopal_img[0,:] else: sum_atmopal += atmopal_img[0,:] # also need to check for events missing due to damage # (or compare against expected number of events) ndrop_inhibit += timing.raw.inhibitCounts(evt) smd.event(evt, mydata=nevt) # high rate data saved to h5 # need to check Matt's new timing-system data on every # event to make sure we haven't missed normalization # data due to deadtime norm+=nevt # fake normalization if andor_img is not None: print('andor data on evt:',nevt,'ndrop_inhibit:',ndrop_inhibit) # check that the high-read readout group (2) didn't # miss any events due to deadtime if ndrop_inhibit[2]!=0: print('*** data lost due to deadtime') # need to prefix the name with "unaligned_" so # the low-rate andor dataset doesn't get padded # to align with the high rate datasets smd.event(evt, mydata=nevt, unaligned_andor_norm=(andor_img/norm), sum_atmopal=sum_atmopal) norm=0 ndrop_inhibit=0 sum_atmopal = None |
goal:
- run a batch job in s3df
- get real-time plots
- for the latest run by default
- if they go reanalyze old runs, that would be a user-selectable option
- could use X-forwarding from s3df to start, but ideally would use open-ports we requested (latter may be stalled)
- timescale: 2 months would be nice, 3 or 4 months is probably OK
...
Overview
Content Tools