Docker-in-Docker (DinD) capabilities of public runners deactivated. More info

Commit 4d796360 authored by LE GAC Renaud's avatar LE GAC Renaud
Browse files

Add the first source get_event_minimal.

parent a0243fc9
......@@ -62,6 +62,12 @@ is_db = (ctrl == "plugin_dbui" and fnct in ("call", "csv", "dbui_conf")) or \
(ctrl == "plugin_event")
if is_db:
# register source for the reporting
event.Event.register_source("minimal event",
event.get_event_minimal,
dict(event=None))
event.Core.define_tables(db, T)
event.Report.define_tables(db, T)
event.Selector.define_tables(virtdb, db, T)
......
......@@ -17,6 +17,8 @@ from model_core import Core
from model_report import Report
from model_selector import Selector
from report_dataframes import (get_event_minimal)
from report_objects import (do_title,
get_value,
split_dbfield,
......
......@@ -36,18 +36,18 @@ class Event(object):
@staticmethod
def register_source(func, kwargs):
def register_source(name, func, kwargs):
"""Register sources which are used in the reporting section.
Args:
name (str): name of the source to be used in the UI.
func (reference): the function generating the DataFrame.
kwargs (dict): input keyword arguments
kwargs (dict): input keyword arguments for the function.
"""
event = PluginManager("event").event
if event.source is None:
event.source = Storage()
if event.sources is None:
event.sources = Storage()
event.source[name] = Storage(func=func, kwargs=kwargs)
event.sources[name] = Storage(func=func, kwargs=kwargs)
......@@ -6,9 +6,9 @@ import plugin_dbui as dbui
from callbacks import INHIBIT_CASCADE_DELETE
from gluon import current, IS_IN_DB
from gluon import current, IS_IN_DB, IS_IN_SET
from gluon.tools import PluginManager
from pydal import Field
from pygments.lexer import default
DEF_COLUMNS_LISTS = \
......@@ -190,7 +190,7 @@ class Report(object):
Field("title", "string", length=255),
Field("definition", "text"),
Field("source", "string", length=255, notnull=True),
Field("kwargs", "json", default=dict()),
Field("kwargs", "text"),
Field("transform", "text"),
Field("group_field",
......@@ -213,6 +213,11 @@ class Report(object):
migrate="lists2.table")
sources = PluginManager("event").event.sources.keys()
sources.sort()
db.lists2.source.requires = IS_IN_SET(sources)
return table
@staticmethod
......
# -*- coding: utf-8 -*-
"""report_dataframe module
"""
import pandas as pd
from datetime import date
from gluon import current
YEAR_MIN = date(1900, 01, 01)
YEAR_MAX = date(9999, 12, 31)
def db2df(db, query, fields=[], columns=None):
"""Transform a database query into a dataframe.
Example::
# database query
query = history.id_events == id_event
query &= history.id_projects == id_project
query &= history.id_fundings == db.fundings.id
query &= history.id_people == db.people.id
query &= history.id_people_categories == db.people_categories.id
query &= history.id_teams == db.teams.id
# fields to be extracted from the database
fields = [
history.id_domains,
history.id_events,
history.id_people,
history.id_projects,
history.id_teams,
db.teams.team,
db.people_categories.category,
db.people_categories.code,
db.people.first_name,
db.people.last_name,
history.percentage,
history.start_date,
history.end_date,
history.data,
db.fundings.agency]
# get the DataFrame
df = db2df(query, fields)
print df.info()
Args:
db (pyDAL.DAL): database connection.
query: database query including statement to join foreign table.
fields (list): list of pyDAL.Field to be extracted from the query.
All fields are extracted when not defined.
columns (list): name of the columns in the dataframe. There is one
to one correspondence between the fields and columns list.
Names of database field are used when columns is not defined.
Returns:
pandas.DataFrame
"""
if columns is None and len(fields) > 0:
columns = [field.name for field in fields]
rows = db.executesql(db(query)._select(*fields))
return pd.DataFrame.from_records(list(rows), columns=columns)
def get_event_minimal(event, kwargs):
"""DataFrame with a minimal set of information for an event.
The selection of the event is performed via the keyword arguments.
The index of the DataFrame is the database id.
The columns of the DataFrame are:
* one column for each key of the history.data dictionary
* end_date (date)
* id_domains (int)
* id_events (int)
* id_objects (int)
* id_objects_categories (int)
* id_people (int)
* id_people_categories (int)
* id_projects (int)
* id_teams (int)
* start_date (date)
Args:
event (str): the event used to generated the DataFrame
Keyword Args:
id_events (int):
id_domains (int):
id_fundings (int):
id_objects (int):
id_object_categories (int):
id_people (int):
id_people_categories (int):
id_projects (int):
id_teams (int):
year_end (date):
year_start (date):
Returns:
pandas.DataFrame
"""
db = current.globalenv['db']
history = db.history
# database query
id_event == get_id(db.events, event=event)
query = query_history(id_events=id_event, **kwargs)
# fields to be extracted from the database
fields = [
history.id,
history.id_domains,
history.id_events,
history.id_fundings,
history.id_object,
history.id_object_categories,
history.id_people,
history.id_people_categories,
history.id_projects,
history.id_teams,
history.start_date,
history.end_date,
history.data]
# columns name for the DataFrame
columns = [field.name for field in fields]
# the DataFrame
df = (db2df(db, query, fields, columns)
.set_index("id")
.pipe(normalize_history_data)
.assign(
start_date=lambda x: x.start_date.fillna(YEAR_MIN),
end_date=lambda x: x.end_date.fillna(YEAR_MAX)))
return df
def normalize_history_data(df):
"""Normalise the JSON field ``history.data``.
The database field ``history.data`` contains a JSON string serialising
a dictionary. Each key is transformed into a DataFrame columns which are
add to the initial DataFrame. The column data is dropped.
Args:
df (pandas.DataFrame): the DataFrame has to contains the columns data.
Returns:
pandas.DataFrame
"""
# 1) convert json string into json object for each row
# 2) convert the data serie into a json string preserving index (split)
# Fix the issue with "name":"data" in pandas version 0.18
sdata = (df.data.apply(lambda x: json.loads(x))
.to_json(orient="split")
.replace('"name":"data",', ""))
# 3) convert the json string into a dataframe, with a column for each key
# 4) fix possible wrong boolean value
# 5) merge dataframe using index
# 6) drop the obsolete columns data
df = (pd.read_json(sdata, orient="split")
.replace("false", False)
.replace("true", True)
.merge(df, left_index=True, right_index=True, how="right")
.drop("data", axis="columns"))
return df
def query_history(db, kwargs):
"""Build the query for the history table.
The operator AND is applied between the conditions.
Conditions are defined by keyword arguments.
Args:
db (gluon.dal.DAL): database connection.
Keyword Args:
id_events (int):
id_domains (int):
id_fundings (int):
id_objects (int):
id_object_categories (int):
id_people (int):
id_people_categories (int):
id_projects (int):
id_teams (int):
year_end (date):
year_start (date):
Returns:
gluon.DAL.Query
"""
history = db.history
query = None
for k, v in kwargs.iteritems():
if v is None:
continue
if k == "year_end":
qi = history.start_date <= year_end
elif k == "year_start":
qi = (history.end_date == None) | (history.end_date >= year_start)
else:
qi = history[k] == v
query = (qi if query is None else (query) & (qi))
return query
......@@ -217,6 +217,11 @@ class ReportUi(object):
language="json",
xtype="xaceeditorfield")
mdf.configure_field("kwargs",
editorHeight=220,
language="json",
xtype="xaceeditorfield")
mdf.configure_field("sorters",
height=210,
hideHeader=True,
......
......@@ -264,7 +264,7 @@ class ViewportUi(object):
add_child = node.add_child
for row in db(db.lists2.id > 0).select(orderby=db.lists.name):
for row in db(db.lists2.id > 0).select(orderby=db.lists2.name):
panel = selector_panel
panel.baseUrl = URL("plugin_event", "lists2")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment