Commit bc31724f authored by Maude Le Jeune's avatar Maude Le Jeune
Browse files

remove seg_ + demo cmb

parent 248a2150
......@@ -88,7 +88,7 @@ class EnvironmentBase():
""" Return the log file name
"""
return self._get_data_fn("seg_"+self._worker.task.seg+".log")
return self._get_data_fn(self._worker.task.seg+".log")
def _hook(self, hook_name, glo):
""" Execute hook code.
......
......@@ -117,7 +117,7 @@ class Pipeline:
if code_dir:
self.repository = LocalRepository(code_dir)
## string, indicates where to save the pipeline products.
self._prefix = prefix
self._prefix = path.realpath(prefix)
if not os.path.exists(prefix):
os.mkdir(prefix)
logdir = os.path.join(prefix, "log")
......@@ -131,6 +131,8 @@ class Pipeline:
self.compute_hash()
## string, sql data base
if sqlfile is None:
sqlfile = path.join(prefix, ".sqlstatus")
self.sqlfile=path.expanduser(path.expandvars(sqlfile))
## boolean, if true, turn the matplotlib backend to Agg
......@@ -201,18 +203,25 @@ class Pipeline:
_edge = re.compile('(\w+)\s*->\s*(\w+)')
c=0
a = _edge.search(s)
sp = '->'
if not a:
_edge = re.compile('(\w+)\s*;')
a = _edge.search(s)
sp = ';'
r = []
while(a):
r.append(a.groups())
c = s.find('->',a.start())
c = s.find(sp,a.start())
a = _edge.search(s,c)
segs = set(flatten(r))
print r
print segs
self._parents=dict([(seg,[]) for seg in segs])
self._children=dict([(seg,[]) for seg in segs])
for c in r:
self._parents[c[1]].append(c[0])
self._children[c[0]].append(c[1])
if len(c)>1:
self._parents[c[1]].append(c[0])
self._children[c[0]].append(c[1])
def strseg(self, seg):
""" Convert a segment to a string.
......@@ -386,7 +395,7 @@ class Pipeline:
s = self.repository.get_all_string(seg)
h = get_hashkey(s, hash).base32_digest()
self._hashes[seg] = h
currdir = path.join(currdir, 'seg_%s_%s'%(seg,h))
currdir = path.join(currdir, '%s_%s'%(seg,h))
self._curr_dirs[seg] = currdir
return h, currdir
......@@ -480,7 +489,7 @@ class Pipeline:
-------
string, segment directory.
"""
return path.join(self.get_curr_dir(seg),'seg_%s.args'%seg)
return path.join(self.get_curr_dir(seg),'%s.args'%seg)
def get_tag_file(self, seg):
""" Return the segment directory.
......@@ -524,7 +533,7 @@ class Pipeline:
def get_log_file (self, seg):
""" Return the segment log filename.
"""
return path.join(self.get_curr_dir(seg),'seg_%s.log'%seg)
return path.join(self.get_curr_dir(seg),'%s.log'%seg)
def get_meta_file (self, seg, prod=-1):
""" Return the meta data filename
......@@ -533,7 +542,7 @@ class Pipeline:
dirname = self.get_curr_dir(seg)
else:
dirname = self.get_data_dir(seg, prod)
return path.join(dirname,'seg_%s.meta'%seg)
return path.join(dirname,'%s.meta'%seg)
......
......@@ -238,13 +238,13 @@ class LocalRepository(Repository):
"""
try:
f = [filter(self._ext_filter,
glob(path.join(self.src_path, 'seg_%s_code.*'%seg)))[0]]
glob(path.join(self.src_path, '%s.*'%seg)))[0]]
except:
f = []
if len(f) == 0:
try:
f = [filter(self._ext_filter,
glob(path.join(self.lib_path, 'seg_%s_code.*'%seg)))[0]]
glob(path.join(self.lib_path, '%s.*'%seg)))[0]]
except:
f = []
if len(f) == 0:
......@@ -276,13 +276,13 @@ class LocalRepository(Repository):
"""
try:
f = [filter(self._ext_filter,
glob(path.join(self.src_path, 'seg_%s_%s.*'%(seg,hook))))[0]]
glob(path.join(self.src_path, '%s_%s.*'%(seg,hook))))[0]]
except:
f = []
if len(f) == 0:
try:
f = [filter(self._ext_filter,
glob(path.join(self.lib_path, 'seg_%s_%s.*'%(seg,hook))))[0]]
glob(path.join(self.lib_path, '%s_%s.*'%(seg,hook))))[0]]
except:
f = []
if len(f) == 0:
......
""" This is the docstring of the segment script.
This segment script illustrates how to use the various utilities
available from the default environment.
"""
### How to organize the segment script ?
###
### A segment script contains some python code which will be applied on
### some data set.
### The pipelet software has no demand on the code, but it
### offers a lot of utilities which may set the user straight
### about some good practice.
###
### From the pipeline point of view the segment script corresponds to
### some processing applied to the data (input/output) with some tuned parameters.
### The pipelet software offers some default utilities with respect of that
### 3 entities :
### - processing,
### - data,
### - parameters.
### The data and files utilities :
###
### Data are usually stored on files.
### The pipelet software offers a specific location for all data files
### that can be parsed from the web interface.
### To get one on those files :
output_fn = get_data_fn ('mydata.dat')
### To retrieve some data file from an upstream segment named 'first':
input_fn = glob_seg ('*.dat', 'first')[0]
### One may need some temporary files also :
temporary_fn = get_tmp_fn()
### To read and write data to files, one may use some default routines
### based on the pickle python module :
load_products (input_fn , globals(), ["D"])
save_products (output_fn, globals(), ["D"])
### The parameters utilities :
###
### The pipelet software saves automatically all parameters listed in
### the variable named 'lst_par'.
### Some of them can be made visible from the interface using the
### variable 'lst_tag'
lst_par = ['my_dim', 'my_option', 'my_nb_of_iter']
lst_tag = ['my_dim', 'my_nb_of_iter']
### To retrieve some parameters of an upstream segment :
load_param ('first', globals(), ['my_previous_option'])
### The processing utilities :
###
### A standard logging.Logger object is available from the segment
### context.
log.info("Until now, nothing's wrong\n")
### It may occur that the processing applied to the data contains some
### generic code you want to recycle from one segment to another.
### This portion of code can be factorized in a hook script names
### 'seg_segname_hookname.py' which can be called with:
hook ('preproc', globals())
### It is also possible to call an arbitrary subprocess without
### loosing the log facility:
logged_subprocess(['my_sub_process', 'my_dim', 'my_nb_of_iter'])
### The pipe scheme utilities :
###
### The pipe scheme can be controlled from the segment environment
### also. The default behaviour is set by the segment input and
### output variables :
output = [1,2,3] ## will generate 3 instances of the downstream
## segment. Each of them will receive one element of
## the list as input.
output = input ## will generate 1 instance of the downstream
## segment, per current instance.
output = None ## will generate 1 instance of the downstream
## segment for all current instance.
### This default behavious can be altered by specifying an @multiplex
### directive (see documentation).
......@@ -297,14 +297,14 @@ def create_pipe(pipename, prefix=[]):
fn = os.path.join(code_dir, "main.py")
with closing(file(fn, 'w')) as f:
f.write(str)
if not os.path.exists(os.path.join(code_dir, "seg_default_code.py")):
shutil.copy(current_dir+"/static/seg_default_code.py", code_dir) ## make a segment file with some doc
if not os.path.exists(os.path.join(code_dir, "default.py")):
shutil.copy(current_dir+"/static/default.py", code_dir) ## make a segment file with some doc
print "\n\n-----------------------------------------------------------"
print " Pipeline '%s' has been successfully created "%pipename
print "-----------------------------------------------------------\n\n"
print "1- Change directory to '%s' to set your pipeline scheme\n in the file named 'main.py'\n"%code_dir
print "2- Rename and edit the 'seg_default_code.py' to set the segments content.\n"
print "2- Rename and edit the 'default.py' to set the segments content.\n"
print "3- Run 'python main.py -d' to enter the debugging execution mode."
print "Type 'python main.py --help' to get the full list of options.\n"
print "4- Run 'pipeweb track %s %s/.sqlstatus' \n to add the pipe to the web interface.\n"%(pipename, prefix)
......
......@@ -125,7 +125,7 @@ class Web:
lst_tag = list(set(lst_tag))
str_tag = ";".join(lst_tag)
conn.execute('update segments set tag=? where seg_id=?',(str_tag,segid))
fn = glob(os.path.join(l[1], "seg_*.meta"))
fn = glob(os.path.join(l[1], "*.meta"))
self._update_meta(fn, str_tag)
raise cherrypy.HTTPRedirect('/'+self.name+'/',303)
......@@ -161,7 +161,7 @@ class Web:
lst_tag.remove(tag)
str_tag = ";".join(lst_tag)
conn.execute('update segments set tag=? where seg_id=?',(str_tag,s[0]))
fn = glob(os.path.join(s[2], "seg_*.meta"))
fn = glob(os.path.join(s[2], "*.meta"))
self._update_meta(fn, str_tag)
raise cherrypy.HTTPRedirect('/'+self.name+'/',303)
......
......@@ -6,6 +6,7 @@ from pipelet.launchers import launch_interactive, launch_process, launch_pbs
import os
import sys
import logging
import os.path as path
### Pipeline properties
......@@ -29,15 +30,13 @@ import logging
### executed first is not defined.
### See doc for more details.
###
# pipe_dot = """
# 'cmb', 'inw', 'plot';
# 'noise', 'inw';
# 'noise', 'clnoise', 'plot';
# """
pipe_dot = """
'cmb';
"""
nside = 512
pipe_dot = """
cmb->cls->clplot;
mask->cls;
"""
### code_dir
### This path corresponds to the directory where the segment python scripts are found.
......@@ -97,6 +96,7 @@ def main(print_info=print_info):
## Build pipeline instance
P = Pipeline(pipe_dot, code_dir=code_dir, prefix=prefix, matplotlib=True, matplotlib_interactive=True)
P.push(cmb=[nside], mask=[nside])
## Interactive mode
if options.debug:
......
""" seg_cmb_code.py
Generate a cmb map from lambda-CDM cl.
"""
import healpy as hp
import pylab as pl
### Define some global parameters
lst_par = ['lmax', 'nside', 'cmb_unit', 'seed']
lst_tag = lst_par
lmax = 500
nside = 256
cmb_unit = "mK_CMB"
seed = 0 ## actually not use by synfast
### Generate a cmb map
cmb_cl = pl.loadtxt("lambda_best_fit.txt")[0:lmax+1,0]
cmb_map = hp.synfast(cmb_cl, nside, lmax=lmax)
### Save it to disk
cmb_cl_fn = get_data_fn ('cls_cmb.txt')
cmb_map_fn = get_data_fn ('map_cmb.fits')
hp.write_map(cmb_map_fn, cmb_map)
pl.savetxt (cmb_cl_fn , cmb_cl)
### Make a plot
cmb_map_fig = cmb_map_fn.replace('.fits', '.png')
hp.mollview(cmb_map)
pl.savefig (cmb_map_fig)
### Set output
output = [seed]
""" This is the docstring of the segment script.
This segment script illustrates how to use the various utilities
available from the default environment.
"""
### How to organize the segment script ?
###
### A segment script contains some python code which will be applied on
### some data set.
### The pipelet software has no demand on the code, but it
### offers a lot of utilities which may set the user straight
### about some good practice.
###
### From the pipeline point of view the segment script corresponds to
### some processing applied to the data (input/output) with some tuned parameters.
### The pipelet software offers some default utilities with respect of that
### 3 entities :
### - processing,
### - data,
### - parameters.
### The data and files utilities :
###
### Data are usually stored on files.
### The pipelet software offers a specific location for all data files
### that can be parsed from the web interface.
### To get one on those files :
output_fn = get_data_fn ('mydata.dat')
### To retrieve some data file from an upstream segment named 'first':
input_fn = glob_seg ('*.dat', 'first')[0]
### One may need some temporary files also :
temporary_fn = get_tmp_fn()
### To read and write data to files, one may use some default routines
### based on the pickle python module :
load_products (input_fn , globals(), ["D"])
save_products (output_fn, globals(), ["D"])
### The parameters utilities :
###
### The pipelet software saves automatically all parameters listed in
### the variable named 'lst_par'.
### Some of them can be made visible from the interface using the
### variable 'lst_tag'
lst_par = ['my_dim', 'my_option', 'my_nb_of_iter']
lst_tag = ['my_dim', 'my_nb_of_iter']
### To retrieve some parameters of an upstream segment :
load_param ('first', globals(), ['my_previous_option'])
### The processing utilities :
###
### A standard logging.Logger object is available from the segment
### context.
log.info("Until now, nothing's wrong\n")
### It may occur that the processing applied to the data contains some
### generic code you want to recycle from one segment to another.
### This portion of code can be factorized in a hook script names
### 'seg_segname_hookname.py' which can be called with:
hook ('preproc', globals())
### It is also possible to call an arbitrary subprocess without
### loosing the log facility:
logged_subprocess(['my_sub_process', 'my_dim', 'my_nb_of_iter'])
### The pipe scheme utilities :
###
### The pipe scheme can be controlled from the segment environment
### also. The default behaviour is set by the segment input and
### output variables :
output = [1,2,3] ## will generate 3 instances of the downstream
## segment. Each of them will receive one element of
## the list as input.
output = input ## will generate 1 instance of the downstream
## segment, per current instance.
output = None ## will generate 1 instance of the downstream
## segment for all current instance.
### This default behavious can be altered by specifying an @multiplex
### directive (see documentation).
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment