Commit 30020622 authored by Maude Le Jeune's avatar Maude Le Jeune
Browse files

test clean up

parent 4cd4ba53
......@@ -3,8 +3,10 @@
def main():
import glob
import os
eps = glob.glob("./*.eps")
for f in eps:
os.system("epstopdf %s"%f)
dia = glob.glob("./*.dia")
for f in dia:
eps = f.replace(".dia", ".eps")
os.system("dia -e %s %s"%(eps,f))
os.system("epstopdf %s"%eps)
if __name__ == "__main__":
main()
......@@ -285,7 +285,7 @@ def create_pipe(pipename, prefix=[]):
if not os.path.exists(code_dir):
os.mkdir(code_dir)
## make prefix and log dir
## set prefix dir
if not prefix:
prefix= code_dir
......
This diff is collapsed.
""" Example main illustrating various ways of launching jobs.
"""
from pipelet.pipeline import Pipeline
from pipelet.launchers import launch_interactive, launch_process, launch_pbs
import os
import sys
import logging
### Pipeline properties
### pipe dot scheme
### The dependencies between segments must form a directed acyclic
### graph. This graph is described by a char string using a subset of the
### graphviz dot language (http://www.graphviz.org). For exemple the string:
### """
### a -> b -> d;
### c -> d;
### c -> e;
### """
### defines a pipeline with 5 segments {"a", "b", "c", "d", "e"}. The
### relation "a->b" ensures that the processing of the segment "a" will be
### done before the processing of its child segment "b". Also the output
### of "a" will be feeded as input for "b". In the given example, the node
### "d" has two parents "b" and "c". Both will be executed before "d". As
### their is no relation between "b" and "c" which of the two will be
### executed first is not defined.
### See doc for more details.
###
# pipe_dot = """
# 'cmb', 'inw', 'plot';
# 'noise', 'inw';
# 'noise', 'clnoise', 'plot';
# """
pipe_dot = """
'cmb';
"""
### code_dir
### This path corresponds to the directory where the segment python scripts are found.
### One may choose a location which is regularly backed up
code_dir = './'
### prefix
### This path corresponds to the directory where the pipeline products will be stored.
### One may choose a location with large disk space and no backup.
prefix = './cmb'
### log_level
### Set the log level which will be displayed on stdout.
### Logs are also written in pipelet log files with level set to DEBUG.
### Log files are available from the web interface.
log_level = logging.DEBUG
### job_name
### This string will be used in the PBS mode to tag all pipeline jobs.
job_name = 'pipelet'
### job_header
### This string will be put at the beginning of each PBS jobs.
### Error and log files will be available from the interface with
### the others log files.
job_header = """
#/bin/bash
echo $PYTHONPATH
"""
### cpu_time
### Maximum cpu time used in PBS mode.
cpu_time = "00:30:00"
### print_info
### Print additional informations after running to access the web interface
print_info = True
def main(print_info=print_info):
""" Run the pipeline.
"""
import optparse
parser = optparse.OptionParser()
parser.add_option('-a', '--add-workers', metavar='N',
help='Submit N supplementary jobs without launching a new server.',type='int')
parser.add_option('-d', '--debug',
help='Start jobs in interactive mode',action="store_true",default=False)
parser.add_option('-p', '--process', metavar='N',
help='Launch jobs as local parallel processes',type='int')
(options, args) = parser.parse_args()
## Build pipeline instance
P = Pipeline(pipe_dot, code_dir=code_dir, prefix=prefix, matplotlib=True, matplotlib_interactive=True)
## Interactive mode
if options.debug:
w, t = launch_interactive(P, log_level=log_level)
w.run()
## Process mode
elif options.process:
launch_process(P, options.process,log_level=log_level)
## PBS mode
elif options.add_workers:
launch_pbs(P,options.add_workers , address=(os.environ['HOST'],50000), job_name=job_name, cpu_time=cpu_time, job_header=job_header)
else:
launch_pbs(P, 1, address=(os.environ['HOST'],50000), job_name=job_name, cpu_time=cpu_time, server=True, job_header=job_header, log_level=log_level)
if print_info:
print "1- Run 'pipeweb track <shortname> %s' \n to add the pipe to the web interface.\n"%(P.sqlfile)
print "2- Set acl with 'pipeutils -a <username> -l 2 %s'\n"%P.sqlfile
print "3- Launch the web server with 'pipeweb start'"
print "You should be able to browse the result on the web page http://localhost:8080\n"
if __name__ == "__main__":
main()
""" seg_cmb_code.py
Generate a cmb map from lambda-CDM cl.
"""
import healpy as hp
import pylab as pl
### Define some global parameters
lst_par = ['lmax', 'nside', 'cmb_unit', 'seed']
lst_tag = lst_par
lmax = 500
nside = 256
cmb_unit = "mK_CMB"
seed = 0 ## actually not use by synfast
### Generate a cmb map
cmb_cl = pl.loadtxt("lambda_best_fit.txt")[0:lmax+1,0]
cmb_map = hp.synfast(cmb_cl, nside, lmax=lmax)
### Save it to disk
cmb_cl_fn = get_data_fn ('cls_cmb.txt')
cmb_map_fn = get_data_fn ('map_cmb.fits')
hp.write_map(cmb_map_fn, cmb_map)
pl.savetxt (cmb_cl_fn , cmb_cl)
### Make a plot
cmb_map_fig = cmb_map_fn.replace('.fits', '.png')
hp.mollview(cmb_map)
pl.savefig (cmb_map_fig)
### Set output
output = [seed]
""" This is the docstring of the segment script.
This segment script illustrates how to use the various utilities
available from the default environment.
"""
### How to organize the segment script ?
###
### A segment script contains some python code which will be applied on
### some data set.
### The pipelet software has no demand on the code, but it
### offers a lot of utilities which may set the user straight
### about some good practice.
###
### From the pipeline point of view the segment script corresponds to
### some processing applied to the data (input/output) with some tuned parameters.
### The pipelet software offers some default utilities with respect of that
### 3 entities :
### - processing,
### - data,
### - parameters.
### The data and files utilities :
###
### Data are usually stored on files.
### The pipelet software offers a specific location for all data files
### that can be parsed from the web interface.
### To get one on those files :
output_fn = get_data_fn ('mydata.dat')
### To retrieve some data file from an upstream segment named 'first':
input_fn = glob_seg ('*.dat', 'first')[0]
### One may need some temporary files also :
temporary_fn = get_tmp_fn()
### To read and write data to files, one may use some default routines
### based on the pickle python module :
load_products (input_fn , globals(), ["D"])
save_products (output_fn, globals(), ["D"])
### The parameters utilities :
###
### The pipelet software saves automatically all parameters listed in
### the variable named 'lst_par'.
### Some of them can be made visible from the interface using the
### variable 'lst_tag'
lst_par = ['my_dim', 'my_option', 'my_nb_of_iter']
lst_tag = ['my_dim', 'my_nb_of_iter']
### To retrieve some parameters of an upstream segment :
load_param ('first', globals(), ['my_previous_option'])
### The processing utilities :
###
### A standard logging.Logger object is available from the segment
### context.
log.info("Until now, nothing's wrong\n")
### It may occur that the processing applied to the data contains some
### generic code you want to recycle from one segment to another.
### This portion of code can be factorized in a hook script names
### 'seg_segname_hookname.py' which can be called with:
hook ('preproc', globals())
### It is also possible to call an arbitrary subprocess without
### loosing the log facility:
logged_subprocess(['my_sub_process', 'my_dim', 'my_nb_of_iter'])
### The pipe scheme utilities :
###
### The pipe scheme can be controlled from the segment environment
### also. The default behaviour is set by the segment input and
### output variables :
output = [1,2,3] ## will generate 3 instances of the downstream
## segment. Each of them will receive one element of
## the list as input.
output = input ## will generate 1 instance of the downstream
## segment, per current instance.
output = None ## will generate 1 instance of the downstream
## segment for all current instance.
### This default behavious can be altered by specifying an @multiplex
### directive (see documentation).
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -9,16 +9,12 @@ S = """
mkgauss->convol;
fftimg->convol;
"""
#T.connect('second', ['third', 'fourth'], 'fourth')
#T.compute_hash()
T = pipeline.Pipeline(S, code_dir=op.abspath('./'), prefix=op.abspath('./'))
T.to_dot('pipeline.dot')
T.push(fftimg=[1,2,3,4])
#T.push (first=["lancelot"])
print T
W,t = launch_interactive(T)
W.run()
......
"""
""" Perform image convolution in Fourier domain.
Fourier inputs are retrived from fftimg and mkgauss segments.
"""
import pylab
## retrieve image dimensions
load_param("fftimg", globals(), ["x_size", "y_size"])
## get image number
img = seg_input['fftimg']
## retrieve Fourier inputs
fft_img = glob_seg("fftimg", "fft_%d.dat"%img)[0]
fft_filter = glob_seg("mkgauss", "fft.dat")[0]
load_products(fft_filter, globals(), ["filter"])
load_products(fft_img, globals(), ["im_fft"])
## Perform convolution
for i in range(x_size):
for j in range(y_size):
im_fft [i,j] = im_fft[i,j] * filter[i,j]
## Inverse Fourier transform
im = pylab.ifft2(im_fft).real
## Make a plot of the filtered maps
pylab.imshow(im)
pylab.gray()
pylab.savefig(get_data_fn("filtered_%s.png"%img))
## output image number
seg_output = [img]
""" Compute 2d fft.
Input images are pushed from the main script and read in
seg_input variable.
"""
import glob
import pylab
lst_par = ["x_size", "y_size"]
lst_tag = ["x_size", "y_size"]
## set image number
if seg_input is not None:
img = seg_input.values()[0]
else:
img = 1
## get image file name
img_file = glob.glob("%d.dat"%img)[0]
## load image and make a plot
im = pylab.loadtxt(img_file)
pylab.imshow(im)
pylab.gray()
pylab.savefig(get_data_fn("%d.png"%img))
## get image dimensions
x_size = pylab.shape(im)[0]
y_size = pylab.shape(im)[1]
## compute fft
im_fft = pylab.fft2(im)
save_products(get_data_fn("fft_%d.dat"%img), globals(), ["im_fft"])
## output image number
seg_output = [img]
""" Compute 2d fft.
""" Import input images
"""
......@@ -15,7 +15,7 @@ else:
img = 1
## get image file name
img_file = glob.glob("%d.dat"%img)[0]
img_file = glob.glob("%d.dat"%img)
## load image and make a plot
im = pylab.loadtxt(img_file)
......@@ -25,12 +25,7 @@ pylab.savefig(get_data_fn("%d.png"%img))
x_size = pylab.shape(im)[0]
y_size = pylab.shape(im)[1]
## compute fft
im_fft = pylab.fft2(im)
save_products(get_data_fn("fft_%d.dat"%img), globals(), ["im_fft"])
seg_output = [img]
save_products(get_data_fn("%d.dat"%img), globals(), ["im"])
# from PIL import Image
......@@ -47,3 +42,5 @@ seg_output = [img]
# #imshow(new)
# savetxt("%d.dat"%(l+1), new)
seg_output = [img]
......@@ -9,7 +9,6 @@ lst_tag = ["fwhm"]
import pylab
## Image dimension
x_size = 256
y_size = 256
......@@ -27,19 +26,22 @@ for i in range(x_size):
for j in range(y_size):
dist = ((i-center_x)**2) + ((j-center_y)**2)
im[i,j] = a*pylab.exp(dist*fact)
for i in range(center_x, center_x+x_size):
for j in range(center_y, center_y+y_size/2):
c = im[i%x_size,j%x_size]
im[i%x_size,j%x_size] = im[i-center_x, j-center_y]
im[i-center_x, j-center_y] = c
# for i in range(center_x, center_x+x_size):
# for j in range(center_y, center_y+y_size/2):
# c = im[i%x_size,j%x_size]
# im[i%x_size,j%x_size] = im[i-center_x, j-center_y]
# im[i-center_x, j-center_y] = c
## plot it
pylab.imshow(im)
pylab.gray()
pylab.savefig(get_data_fn("gaussian_pattern.png"))
## compute its fft
filter = pylab.fft2(im)
filter = pylab.fftshift(filter)
## save it to disk
save_products(get_data_fn("fft.dat"), globals(), ["filter"])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment