Fixed confusing variable names in JSON output functions and methods.

These functions and methods were originally based on the XML output.
Consequently, those variable names were also preserved. All "<name>_xml"
variables are now named "<name>_json" instead.
This commit is contained in:
Adam Waldenberg 2015-11-24 21:20:35 +01:00
parent bfde70db91
commit 9bd4b979b3
7 changed files with 90 additions and 91 deletions

View File

@ -95,27 +95,27 @@ class BlameOutput(Outputable):
print(blame_xml)
def output_json(self):
message_xml = "\t\t\t\"message\": \"" + _(BLAME_INFO_TEXT) + "\",\n"
blame_xml = ""
message_json = "\t\t\t\"message\": \"" + _(BLAME_INFO_TEXT) + "\",\n"
blame_json = ""
for i in sorted(self.blame.get_summed_blames().items()):
author_email = self.changes.get_latest_email_by_author(i[0])
name_xml = "\t\t\t\t\"name\": \"" + i[0] + "\",\n"
name_json = "\t\t\t\t\"name\": \"" + i[0] + "\",\n"
email_json = "\t\t\t\t\"email\": \"" + author_email + "\",\n"
gravatar_xml = "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n"
rows_xml = "\t\t\t\t\"rows\": " + str(i[1].rows) + ",\n"
stability_xml = ("\t\t\t\t\"stability\": " + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows,
self.changes)) + ",\n")
age_xml = ("\t\t\t\t\"age\": " + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + ",\n")
percentage_in_comments_xml = ("\t\t\t\t\"percentage_in_comments\": " + "{0:.2f}".format(100.0 * i[1].comments / i[1].rows) +
"\n")
blame_xml += ("{\n" + name_xml + email_json + gravatar_xml + rows_xml + stability_xml + age_xml +
percentage_in_comments_xml + "\t\t\t},")
gravatar_json = "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n"
rows_json = "\t\t\t\t\"rows\": " + str(i[1].rows) + ",\n"
stability_json = ("\t\t\t\t\"stability\": " + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows,
self.changes)) + ",\n")
age_json = ("\t\t\t\t\"age\": " + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + ",\n")
percentage_in_comments_json = ("\t\t\t\t\"percentage_in_comments\": " +
"{0:.2f}".format(100.0 * i[1].comments / i[1].rows) + "\n")
blame_json += ("{\n" + name_json + email_json + gravatar_json + rows_json + stability_json + age_json +
percentage_in_comments_json + "\t\t\t},")
else:
blame_xml = blame_xml[:-1]
blame_json = blame_json[:-1]
print(",\n\t\t\"blame\": {\n" + message_xml + "\t\t\t\"authors\": [\n\t\t\t" + blame_xml + "]\n\t\t}", end="")
print(",\n\t\t\"blame\": {\n" + message_json + "\t\t\t\"authors\": [\n\t\t\t" + blame_json + "]\n\t\t}", end="")
def output_text(self):
if sys.stdout.isatty() and format.is_interactive_format():

View File

@ -104,29 +104,29 @@ class ChangesOutput(Outputable):
total_changes += authorinfo_list.get(i).deletions
if authorinfo_list:
message_xml = "\t\t\t\"message\": \"" + _(HISTORICAL_INFO_TEXT) + "\",\n"
changes_xml = ""
message_json = "\t\t\t\"message\": \"" + _(HISTORICAL_INFO_TEXT) + "\",\n"
changes_json = ""
for i in sorted(authorinfo_list):
author_email = self.changes.get_latest_email_by_author(i)
authorinfo = authorinfo_list.get(i)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
name_xml = "\t\t\t\t\"name\": \"" + i + "\",\n"
name_json = "\t\t\t\t\"name\": \"" + i + "\",\n"
email_json = "\t\t\t\t\"email\": \"" + author_email + "\",\n"
gravatar_xml = "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n"
commits_xml = "\t\t\t\t\"commits\": " + str(authorinfo.commits) + ",\n"
insertions_xml = "\t\t\t\t\"insertions\": " + str(authorinfo.insertions) + ",\n"
deletions_xml = "\t\t\t\t\"deletions\": " + str(authorinfo.deletions) + ",\n"
percentage_xml = "\t\t\t\t\"percentage_of_changes\": " + "{0:.2f}".format(percentage) + "\n"
gravatar_json = "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n"
commits_json = "\t\t\t\t\"commits\": " + str(authorinfo.commits) + ",\n"
insertions_json = "\t\t\t\t\"insertions\": " + str(authorinfo.insertions) + ",\n"
deletions_json = "\t\t\t\t\"deletions\": " + str(authorinfo.deletions) + ",\n"
percentage_json = "\t\t\t\t\"percentage_of_changes\": " + "{0:.2f}".format(percentage) + "\n"
changes_xml += ("{\n" + name_xml + email_json + gravatar_xml + commits_xml + insertions_xml +
deletions_xml + percentage_xml + "\t\t\t}")
changes_xml += ","
changes_json += ("{\n" + name_json + email_json + gravatar_json + commits_json +
insertions_json + deletions_json + percentage_json + "\t\t\t}")
changes_json += ","
else:
changes_xml = changes_xml[:-1]
changes_json = changes_json[:-1]
print("\t\t\"changes\": {\n" + message_xml + "\t\t\t\"authors\": [\n\t\t\t" + changes_xml + "]\n\t\t}", end="")
print("\t\t\"changes\": {\n" + message_json + "\t\t\t\"authors\": [\n\t\t\t" + changes_json + "]\n\t\t}", end="")
else:
print("\t\t\"exception\": \"" + _(NO_COMMITED_FILES_TEXT) + "\"")

View File

@ -53,21 +53,21 @@ class ExtensionsOutput(Outputable):
def output_json(self):
if extensions.__located_extensions__:
message_xml = "\t\t\t\"message\": \"" + _(EXTENSIONS_INFO_TEXT) + "\",\n"
used_extensions_xml = ""
unused_extensions_xml = ""
message_json = "\t\t\t\"message\": \"" + _(EXTENSIONS_INFO_TEXT) + "\",\n"
used_extensions_json = ""
unused_extensions_json = ""
for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i):
used_extensions_xml += "\"" + i + "\", "
used_extensions_json += "\"" + i + "\", "
else:
unused_extensions_xml += "\"" + i + "\", "
unused_extensions_json += "\"" + i + "\", "
used_extensions_xml = used_extensions_xml[:-2]
unused_extensions_xml = unused_extensions_xml[:-2]
used_extensions_json = used_extensions_json[:-2]
unused_extensions_json = unused_extensions_json[:-2]
print(",\n\t\t\"extensions\": {\n" + message_xml + "\t\t\t\"used\": [ " + used_extensions_xml + " ],\n" +
"\t\t\t\"unused\": [ " + unused_extensions_xml + " ]\n" + "\t\t}", end="")
print(",\n\t\t\"extensions\": {\n" + message_json + "\t\t\t\"used\": [ " + used_extensions_json +
" ],\n\t\t\t\"unused\": [ " + unused_extensions_json + " ]\n" + "\t\t}", end="")
def output_text(self):
if extensions.__located_extensions__:

View File

@ -59,16 +59,16 @@ class FilteringOutput(Outputable):
@staticmethod
def __output_json_section__(info_string, filtered, container_tagname):
if filtered:
message_xml = "\t\t\t\t\"message\": \"" + info_string + "\",\n"
filtering_xml = ""
message_json = "\t\t\t\t\"message\": \"" + info_string + "\",\n"
filtering_json = ""
for i in filtered:
filtering_xml += "\t\t\t\t\t\"" + i + "\",\n"
filtering_json += "\t\t\t\t\t\"" + i + "\",\n"
else:
filtering_xml = filtering_xml[:-3]
filtering_json = filtering_json[:-3]
return "\n\t\t\t\"{0}\": {{\n".format(container_tagname) + message_xml + \
"\t\t\t\t\"entries\": [\n" + filtering_xml + "\"\n\t\t\t\t]\n\t\t\t},"
return "\n\t\t\t\"{0}\": {{\n".format(container_tagname) + message_json + \
"\t\t\t\t\"entries\": [\n" + filtering_json + "\"\n\t\t\t\t]\n\t\t\t},"
return ""

View File

@ -98,38 +98,38 @@ class MetricsOutput(Outputable):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print(",\n\t\t\"metrics\": {\n\t\t\t\"message\": \"" + _(METRICS_MISSING_INFO_TEXT) + "\"\n\t\t}", end="")
else:
eloc_xml = ""
eloc_json = ""
if self.metrics.eloc:
for i in sorted(set([(j, i) for (i, j) in self.metrics.eloc.items()]), reverse=True):
eloc_xml += "{\n\t\t\t\t\"type\": \"estimated-lines-of-code\",\n"
eloc_xml += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n"
eloc_xml += "\t\t\t\t\"value\": " + str(i[0]) + "\n"
eloc_xml += "\t\t\t},"
eloc_json += "{\n\t\t\t\t\"type\": \"estimated-lines-of-code\",\n"
eloc_json += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n"
eloc_json += "\t\t\t\t\"value\": " + str(i[0]) + "\n"
eloc_json += "\t\t\t},"
else:
if not self.metrics.cyclomatic_complexity:
eloc_xml = eloc_xml[:-1]
eloc_json = eloc_json[:-1]
if self.metrics.cyclomatic_complexity:
for i in sorted(set([(j, i) for (i, j) in self.metrics.cyclomatic_complexity.items()]), reverse=True):
eloc_xml += "{\n\t\t\t\t\"type\": \"cyclomatic-complexity\",\n"
eloc_xml += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n"
eloc_xml += "\t\t\t\t\"value\": " + str(i[0]) + "\n"
eloc_xml += "\t\t\t},"
eloc_json += "{\n\t\t\t\t\"type\": \"cyclomatic-complexity\",\n"
eloc_json += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n"
eloc_json += "\t\t\t\t\"value\": " + str(i[0]) + "\n"
eloc_json += "\t\t\t},"
else:
if not self.metrics.cyclomatic_complexity_density:
eloc_xml = eloc_xml[:-1]
eloc_json = eloc_json[:-1]
if self.metrics.cyclomatic_complexity_density:
for i in sorted(set([(j, i) for (i, j) in self.metrics.cyclomatic_complexity_density.items()]), reverse=True):
eloc_xml += "{\n\t\t\t\t\"type\": \"cyclomatic-complexity-density\",\n"
eloc_xml += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n"
eloc_xml += "\t\t\t\t\"value\": {0:.3f} \"\n".format(i[0])
eloc_xml += "\t\t\t},"
eloc_json += "{\n\t\t\t\t\"type\": \"cyclomatic-complexity-density\",\n"
eloc_json += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n"
eloc_json += "\t\t\t\t\"value\": {0:.3f} \"\n".format(i[0])
eloc_json += "\t\t\t},"
else:
eloc_xml = eloc_xml[:-1]
eloc_json = eloc_json[:-1]
print(",\n\t\t\"metrics\": {\n\t\t\t\"violations\": [\n\t\t\t" + eloc_xml + "]\n\t\t}", end="")
print(",\n\t\t\"metrics\": {\n\t\t\t\"violations\": [\n\t\t\t" + eloc_json + "]\n\t\t}", end="")
def output_xml(self):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print("\t<metrics>\n\t\t<message>" + _(METRICS_MISSING_INFO_TEXT) + "</message>\n\t</metrics>")

View File

@ -83,8 +83,8 @@ class ResponsibilitiesOutput(Outputable):
print(resp_xml)
def output_json(self):
message_xml = "\t\t\t\"message\": \"" + _(RESPONSIBILITIES_INFO_TEXT) + "\",\n"
resp_xml = ""
message_json = "\t\t\t\"message\": \"" + _(RESPONSIBILITIES_INFO_TEXT) + "\",\n"
resp_json = ""
for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
@ -92,27 +92,26 @@ class ResponsibilitiesOutput(Outputable):
if responsibilities:
author_email = self.changes.get_latest_email_by_author(i)
resp_xml += "{\n"
resp_xml += "\t\t\t\t\"name\": \"" + i + "\",\n"
resp_xml += "\t\t\t\t\"email\": \"" + author_email + "\",\n"
resp_xml += "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n"
resp_xml += "\t\t\t\t\"files\": [\n\t\t\t\t"
resp_json += "{\n"
resp_json += "\t\t\t\t\"name\": \"" + i + "\",\n"
resp_json += "\t\t\t\t\"email\": \"" + author_email + "\",\n"
resp_json += "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n"
resp_json += "\t\t\t\t\"files\": [\n\t\t\t\t"
for j, entry in enumerate(responsibilities):
resp_xml += "{\n"
resp_xml += "\t\t\t\t\t\"name\": \"" + entry[1] + "\",\n"
resp_xml += "\t\t\t\t\t\"rows\": " + str(entry[0]) + "\n"
resp_xml += "\t\t\t\t},"
resp_json += "{\n"
resp_json += "\t\t\t\t\t\"name\": \"" + entry[1] + "\",\n"
resp_json += "\t\t\t\t\t\"rows\": " + str(entry[0]) + "\n"
resp_json += "\t\t\t\t},"
if j >= 9:
break
resp_xml = resp_xml[:-1]
resp_xml += "]\n"
resp_xml += "\t\t\t},"
resp_json = resp_json[:-1]
resp_json += "]\n\t\t\t},"
resp_xml = resp_xml[:-1]
print(",\n\t\t\"responsibilities\": {\n" + message_xml + "\t\t\t\"authors\": [\n\t\t\t" + resp_xml + "]\n\t\t}", end="")
resp_json = resp_json[:-1]
print(",\n\t\t\"responsibilities\": {\n" + message_json + "\t\t\t\"authors\": [\n\t\t\t" + resp_json + "]\n\t\t}", end="")
def output_xml(self):
message_xml = "\t\t<message>" + _(RESPONSIBILITIES_INFO_TEXT) + "</message>\n"

View File

@ -134,18 +134,18 @@ class TimelineOutput(Outputable):
def output_json(self):
if self.changes.get_commits():
message_xml = "\t\t\t\"message\": \"" + _(TIMELINE_INFO_TEXT) + "\",\n"
timeline_xml = ""
periods_xml = "\t\t\t\"period_length\": \"{0}\",\n".format("week" if self.useweeks else "month")
periods_xml += "\t\t\t\"periods\": [\n\t\t\t"
message_json = "\t\t\t\"message\": \"" + _(TIMELINE_INFO_TEXT) + "\",\n"
timeline_json = ""
periods_json = "\t\t\t\"period_length\": \"{0}\",\n".format("week" if self.useweeks else "month")
periods_json += "\t\t\t\"periods\": [\n\t\t\t"
timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
for period in periods:
name_xml = "\t\t\t\t\"name\": \"" + str(period) + "\",\n"
authors_xml = "\t\t\t\t\"authors\": [\n\t\t\t\t"
name_json = "\t\t\t\t\"name\": \"" + str(period) + "\",\n"
authors_json = "\t\t\t\t\"authors\": [\n\t\t\t\t"
for name in names:
if timeline_data.is_author_in_period(period, name[0]):
@ -156,21 +156,21 @@ class TimelineOutput(Outputable):
if len(signs_str) == 0:
signs_str = "."
authors_xml += "{\n\t\t\t\t\t\"name\": \"" + name[0] + "\",\n"
authors_xml += "\t\t\t\t\t\"email\": \"" + name[1] + "\",\n"
authors_xml += "\t\t\t\t\t\"gravatar\": \"" + gravatar.get_url(name[1]) + "\",\n"
authors_xml += "\t\t\t\t\t\"work\": \"" + signs_str + "\"\n\t\t\t\t},"
authors_json += "{\n\t\t\t\t\t\"name\": \"" + name[0] + "\",\n"
authors_json += "\t\t\t\t\t\"email\": \"" + name[1] + "\",\n"
authors_json += "\t\t\t\t\t\"gravatar\": \"" + gravatar.get_url(name[1]) + "\",\n"
authors_json += "\t\t\t\t\t\"work\": \"" + signs_str + "\"\n\t\t\t\t},"
else:
authors_xml = authors_xml[:-1]
authors_json = authors_json[:-1]
authors_xml += "],\n"
modified_rows_xml = "\t\t\t\t\"modified_rows\": " + \
authors_json += "],\n"
modified_rows_json = "\t\t\t\t\"modified_rows\": " + \
str(timeline_data.get_total_changes_in_period(period)[2]) + "\n"
timeline_xml += "{\n" + name_xml + authors_xml + modified_rows_xml + "\t\t\t},"
timeline_json += "{\n" + name_json + authors_json + modified_rows_json + "\t\t\t},"
else:
timeline_xml = timeline_xml[:-1]
timeline_json = timeline_json[:-1]
print(",\n\t\t\"timeline\": {\n" + message_xml + periods_xml + timeline_xml + "]\n\t\t}", end="")
print(",\n\t\t\"timeline\": {\n" + message_json + periods_json + timeline_json + "]\n\t\t}", end="")
def output_xml(self):
if self.changes.get_commits():