Commit 39f9f323 authored by Miquel Torres's avatar Miquel Torres Committed by GitHub
Browse files

Merge pull request #209 from catalin-manciu/image_gen_api

Add the 'makeimage' service
parents a9edb751 53d33b05
from io import BytesIO
from matplotlib.figure import Figure
from matplotlib.ticker import FormatStrFormatter
from matplotlib.backends.backend_agg import FigureCanvasAgg
DEF_CHART_W = 600
DEF_CHART_H = 500
MIN_CHART_W = 400
MIN_CHART_H = 300
def gen_image_from_results(result_data, width, height):
canvas_width = width if width is not None else DEF_CHART_W
canvas_height = height if height is not None else DEF_CHART_H
canvas_width = max(canvas_width, MIN_CHART_W)
canvas_height = max(canvas_height, MIN_CHART_H)
values = [element.value for element in result_data['results']]
max_value = max(values)
min_value = min(values)
value_range = max_value - min_value
range_increment = 0.05 * abs(value_range)
fig = Figure(figsize=(canvas_width / 100, canvas_height / 100), dpi=100)
ax = fig.add_axes([.1, .15, .85, .75])
ax.set_ylim(min_value - range_increment, max_value + range_increment)
xax = range(0, len(values))
yax = values
ax.set_xticks(xax)
ax.set_xticklabels([element.date.strftime('%d %b') for element in
result_data['results']], rotation=75)
ax.set_title(result_data['benchmark'].name)
if result_data['relative']:
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f%%'))
font_sizes = [16, 16]
dimensions = [canvas_width, canvas_height]
for idx, value in enumerate(dimensions):
if value < 500:
font_sizes[idx] = 8
elif value < 1000:
font_sizes[idx] = 12
if result_data['relative']:
font_sizes[0] -= 2
for item in ax.get_yticklabels():
item.set_fontsize(font_sizes[0])
for item in ax.get_xticklabels():
item.set_fontsize(font_sizes[1])
ax.title.set_fontsize(font_sizes[1] + 4)
ax.scatter(xax, yax)
ax.plot(xax, yax)
canvas = FigureCanvasAgg(fig)
buf = BytesIO()
canvas.print_png(buf)
buf_data = buf.getvalue()
return buf_data
......@@ -23,6 +23,7 @@ urlpatterns += patterns('codespeed.views',
url(r'^timeline/json/$', 'gettimelinedata', name='gettimelinedata'),
url(r'^comparison/$', 'comparison', name='comparison'),
url(r'^comparison/json/$', 'getcomparisondata', name='getcomparisondata'),
url(r'^makeimage/$', 'makeimage', name='makeimage'),
)
urlpatterns += patterns('codespeed.views',
......
from django.core.exceptions import ValidationError
def validate_results_request(data):
"""
Validates that a result request dictionary has all needed parameters
and their type is correct.
Throws ValidationError on error.
"""
mandatory_data = [
'env',
'proj',
'branch',
'exe',
'ben',
]
for key in mandatory_data:
if key not in data:
raise ValidationError('Key "' + key +
'" missing from GET request!')
elif data[key] == '':
raise ValidationError('Value for key "' + key +
'" empty in GET request!')
integer_data = [
'revs',
'width',
'height'
]
"""
Check that the items in integer_data are the correct format,
if they exist
"""
for key in integer_data:
if key in data:
try:
rev_value = int(data[key])
except ValueError:
raise ValidationError('Value for "' + key +
'" is not an integer!')
if rev_value <= 0:
raise ValidationError('Value for "' + key + '" should be a'
' strictly positive integer!')
......@@ -3,9 +3,12 @@ from __future__ import absolute_import, unicode_literals
import json
import logging
import django
from django.core.urlresolvers import reverse
from django.http import HttpResponse, Http404, HttpResponseBadRequest
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.http import HttpResponse, Http404, HttpResponseBadRequest,\
HttpResponseNotFound
from django.shortcuts import get_object_or_404, render_to_response
from django.views.decorators.http import require_GET, require_POST
from django.views.decorators.csrf import csrf_exempt
......@@ -15,9 +18,12 @@ from django.conf import settings
from .models import (Environment, Report, Project, Revision, Result,
Executable, Benchmark, Branch)
from .views_data import (get_default_environment, getbaselineexecutables,
getdefaultexecutable, getcomparisonexes)
getdefaultexecutable, getcomparisonexes,
get_benchmark_results)
from .results import save_result, create_report_if_enough_data
from . import commits
from .validators import validate_results_request
from .images import gen_image_from_results
logger = logging.getLogger(__name__)
......@@ -731,3 +737,38 @@ def add_json_results(request):
logger.debug("add_json_results: completed")
return HttpResponse("All result data saved successfully", status=202)
def django_has_content_type():
return (django.VERSION[0] > 1 or
(django.VERSION[0] == 1 and django.VERSION[1] >= 6))
@require_GET
def makeimage(request):
data = request.GET
try:
validate_results_request(data)
except ValidationError as err:
return HttpResponseBadRequest(str(err))
try:
result_data = get_benchmark_results(data)
except ObjectDoesNotExist as err:
return HttpResponseNotFound(str(err))
image_data = gen_image_from_results(
result_data,
int(data['width']) if 'width' in data else None,
int(data['height']) if 'height' in data else None)
if django_has_content_type():
response = HttpResponse(content=image_data, content_type='image/png')
else:
response = HttpResponse(content=image_data, mimetype='image/png')
response['Content-Length'] = len(image_data)
response['Content-Disposition'] = 'attachment; filename=image.png'
return response
......@@ -2,8 +2,9 @@
from __future__ import absolute_import
from django.conf import settings
from codespeed.models import Executable, Revision, Project, Branch
from django.core.exceptions import ObjectDoesNotExist
from codespeed.models import Executable, Revision, Project, Branch,\
Environment, Benchmark, Result
def get_default_environment(enviros, data, multi=False):
......@@ -147,3 +148,83 @@ def getcomparisonexes():
all_executables[proj] = executables
exekeys += executablekeys
return all_executables, exekeys
def get_benchmark_results(data):
environment = Environment.objects.get(name=data['env'])
project = Project.objects.get(name=data['proj'])
executable = Executable.objects.get(name=data['exe'], project=project)
branch = Branch.objects.get(name=data['branch'], project=project)
benchmark = Benchmark.objects.get(name=data['ben'])
number_of_revs = int(data.get('revs', 10))
baseline_commit_name = (data['base_commit'] if 'base_commit' in data
else None)
relative_results = (
('relative' in data and data['relative'] in ['1', 'yes']) or
baseline_commit_name is not None)
result_query = Result.objects.filter(
benchmark=benchmark
).filter(
environment=environment
).filter(
executable=executable
).filter(
revision__project=project
).filter(
revision__branch=branch
).select_related(
"revision"
).order_by('-date')[:number_of_revs]
if len(result_query) == 0:
raise ObjectDoesNotExist("No results were found!")
result_list = [item for item in result_query]
result_list.reverse()
if relative_results:
ref_value = result_list[0].value
if baseline_commit_name is not None:
baseline_env = environment
baseline_proj = project
baseline_exe = executable
baseline_branch = branch
if 'base_env' in data:
baseline_env = Environment.objects.get(name=data['base_env'])
if 'base_proj' in data:
baseline_proj = Project.objects.get(name=data['base_proj'])
if 'base_exe' in data:
baseline_exe = Executable.objects.get(name=data['base_exe'],
project=baseline_proj)
if 'base_branch' in data:
baseline_branch = Branch.objects.get(name=data['base_branch'],
project=baseline_proj)
base_data = Result.objects.get(
benchmark=benchmark,
environment=baseline_env,
executable=baseline_exe,
revision__project=baseline_proj,
revision__branch=baseline_branch,
revision__commitid=baseline_commit_name)
ref_value = base_data.value
if relative_results:
for element in result_list:
element.value = (100 * (element.value - ref_value)) / ref_value
return {
'environment': environment,
'project': project,
'executable': executable,
'branch': branch,
'benchmark': benchmark,
'results': result_list,
'relative': relative_results,
}
......@@ -9,7 +9,7 @@ setup(
download_url="https://github.com/tobami/codespeed/tags",
license='GNU Lesser General Public License version 2.1',
keywords=["benchmarking", "visualization"],
install_requires=['django>=1.6,<1.9', 'isodate>=0.4.7,<0.6'],
install_requires=['django>=1.6,<1.9', 'isodate>=0.4.7,<0.6', 'matplotlib>=1.4.3'],
packages=find_packages(exclude=['ez_setup', 'sample_project']),
description='A web application to monitor and analyze the performance of your code',
include_package_data=True,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment