Commit ccf31df3 authored by Betoule Marc's avatar Betoule Marc
Browse files

Setting the level of log messages to what seems reasonable

parent 57963c89
......@@ -222,14 +222,14 @@ class Scheduler():
dprod = [t.task_input for t in d] ## done products
failed = self.tracker.get_failed(seg) # failed tasks
failed_prod = [t.task_input for t in failed] # failed products
logger.debug('Found %d done tasks segment %s'%(len(d),seg))
logger.debug('Found %d failed tasks segment %s'%(len(failed),seg))
logger.info('Found %d done tasks segment %s'%(len(d),seg))
logger.info('Found %d failed tasks segment %s'%(len(failed),seg))
## task list to queue
l = self.products_list.multiplex(seg, parents, self.pipe.repository.get_directive(Multiplex,seg))
## task with no input
if not l:
l = [Task(seg)]
logger.debug('Found %d tasks in seg %s to get done'%(len(l),seg))
logger.info('Found %d tasks in seg %s to get done'%(len(l),seg))
for t in l: # foreach task of the task list
if (t.task_input in failed_prod): # done but failed
logger.debug("task already done and failed in seg %s"%seg)
......@@ -298,7 +298,7 @@ class Scheduler():
while(self.task_queue.get(block=False)):
self.task_queue.task_done()
except Queue.Empty:
logger.info("Empty Queue")
logger.debug("Empty Queue")
def run(self):
""" Start the scheduler.
......@@ -313,7 +313,7 @@ class Scheduler():
for k,v in self.pipe._input.iteritems():
t = Task(self.pipe.get_parents(k)[0], task_output=v)
self.products_list.push(t)
logger.info("Pushing phantom task %s"%str(t))
logger.debug("Pushing phantom task %s"%str(t))
try:
for s in self.pipe.flatten():
self.push_next_seg(s)
......
......@@ -212,15 +212,15 @@ class SqliteTracker(Tracker,threading.Thread):
'select seg_id from segments where curr_dir = ? limit 1',
(curr_dir,)).fetchone()[0]
self.seg_id_cache[s] = seg_id
logger.info("Segment %s instance (%s) already registered in db."%(s, curr_dir))
logger.debug("Segment %s instance (%s) already registered in db."%(s, curr_dir))
except TypeError:
logger.info("Creating segment %s instance (%s)."%(s, curr_dir))
logger.debug("Creating segment %s instance (%s)."%(s, curr_dir))
c = self.conn.execute(
'insert into segments (seg, curr_dir, comment) values (?, ?, ?)'
,(s, curr_dir, docline))
seg_id = c.lastrowid
self.seg_id_cache[s] = seg_id
logger.info("Storing connectivity for segment %s."%s)
logger.debug("Storing connectivity for segment %s."%s)
for p in self.pipe._parents[s]:
self.conn.execute(
......
......@@ -98,7 +98,7 @@ class Worker(object):
"""
logger.info( 'checking in ...')
if not self.scheduler.check_in():
logger.warning( "check_in failed : stopping")
logger.error( "check_in failed : stopping")
return
logger.info( 'checked in.')
......@@ -117,7 +117,7 @@ class Worker(object):
self.task = None
logger.info("%d jobs completed" % n)
except AbortError, e:
logger.warning( "Abort after catching signal %d" % e.signal)
logger.error( "Abort after catching signal %d" % e.signal)
self.scheduler.requeue(task)
finally:
self.terminate()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment