Massive Linting and clean up

This commit is contained in:
JP White 2021-02-27 22:39:08 -05:00
parent 2dd72c1d96
commit c17c5795ad
31 changed files with 2233 additions and 1822 deletions

View File

@ -26,14 +26,11 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
make requirements
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
make test-coverage
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
make lint
- name: Test with pytest
run: |
pytest
make test-coverage

View File

@ -35,9 +35,13 @@ clean-test: ## remove test and coverage artifacts
rm -fr .pytest_cache
lint: ## check style with flake8
black gitinspector --line-length 120
find . -name '*.py' -exec autopep8 -i {} --max-line-length=120 \;
flake8 gitinspector tests --max-line-length=120
# stop the build if there are Python syntax errors or undefined names
flake8 gitinspector tests --count --select=E9,F63,F7,F82 --show-source --statistics --builtins="_"
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 gitinspector tests --count --ignore=E722,W503,E401,C901 --exit-zero --max-complexity=10 --max-line-length=127 --statistics --builtins="_"
format: ## auto format all the code with black
black gitinspector --line-length 127
test: ## run tests quickly with the default Python
pytest

View File

@ -21,43 +21,45 @@ import os
import subprocess
import sys
def get_basedir():
if hasattr(sys, "frozen"): # exists when running via py2exe
return sys.prefix
else:
return os.path.dirname(os.path.realpath(__file__))
if hasattr(sys, "frozen"): # exists when running via py2exe
return sys.prefix
else:
return os.path.dirname(os.path.realpath(__file__))
def get_basedir_git(path=None):
previous_directory = None
previous_directory = None
if path != None:
previous_directory = os.getcwd()
os.chdir(path)
if path is not None:
previous_directory = os.getcwd()
os.chdir(path)
bare_command = subprocess.Popen(["git", "rev-parse", "--is-bare-repository"],
stdout=subprocess.PIPE, stderr=open(os.devnull, "w"))
bare_command = subprocess.Popen(
["git", "rev-parse", "--is-bare-repository"], stdout=subprocess.PIPE, stderr=open(os.devnull, "w")
)
isbare = bare_command.stdout.readlines()
bare_command.wait()
isbare = bare_command.stdout.readlines()
bare_command.wait()
if bare_command.returncode != 0:
sys.exit(_("Error processing git repository at \"%s\"." % os.getcwd()))
if bare_command.returncode != 0:
sys.exit(_('Error processing git repository at "%s".' % os.getcwd()))
isbare = (isbare[0].decode("utf-8", "replace").strip() == "true")
absolute_path = None
isbare = isbare[0].decode("utf-8", "replace").strip() == "true"
absolute_path = None
if isbare:
absolute_path = subprocess.Popen(["git", "rev-parse", "--git-dir"], stdout=subprocess.PIPE).stdout
else:
absolute_path = subprocess.Popen(["git", "rev-parse", "--show-toplevel"],
stdout=subprocess.PIPE).stdout
if isbare:
absolute_path = subprocess.Popen(["git", "rev-parse", "--git-dir"], stdout=subprocess.PIPE).stdout
else:
absolute_path = subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE).stdout
absolute_path = absolute_path.readlines()
absolute_path = absolute_path.readlines()
if len(absolute_path) == 0:
sys.exit(_("Unable to determine absolute path of git repository."))
if len(absolute_path) == 0:
sys.exit(_("Unable to determine absolute path of git repository."))
if path != None:
os.chdir(previous_directory)
if path is not None:
os.chdir(previous_directory)
return absolute_path[0].decode("utf-8", "replace").strip()
return absolute_path[0].decode("utf-8", "replace").strip()

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import datetime
import multiprocessing
import re
@ -30,172 +29,189 @@ from . import comment, extensions, filtering, format, interval, terminal
NUM_THREADS = multiprocessing.cpu_count()
class BlameEntry(object):
rows = 0
skew = 0 # Used when calculating average code age.
comments = 0
rows = 0
skew = 0 # Used when calculating average code age.
comments = 0
__thread_lock__ = threading.BoundedSemaphore(NUM_THREADS)
__blame_lock__ = threading.Lock()
AVG_DAYS_PER_MONTH = 30.4167
class BlameThread(threading.Thread):
def __init__(self, useweeks, changes, blame_command, extension, blames, filename):
__thread_lock__.acquire() # Lock controlling the number of threads running
threading.Thread.__init__(self)
def __init__(self, useweeks, changes, blame_command, extension, blames, filename):
__thread_lock__.acquire() # Lock controlling the number of threads running
threading.Thread.__init__(self)
self.useweeks = useweeks
self.changes = changes
self.blame_command = blame_command
self.extension = extension
self.blames = blames
self.filename = filename
self.useweeks = useweeks
self.changes = changes
self.blame_command = blame_command
self.extension = extension
self.blames = blames
self.filename = filename
self.is_inside_comment = False
self.is_inside_comment = False
def __clear_blamechunk_info__(self):
self.blamechunk_email = None
self.blamechunk_is_last = False
self.blamechunk_is_prior = False
self.blamechunk_revision = None
self.blamechunk_time = None
def __clear_blamechunk_info__(self):
self.blamechunk_email = None
self.blamechunk_is_last = False
self.blamechunk_is_prior = False
self.blamechunk_revision = None
self.blamechunk_time = None
def __handle_blamechunk_content__(self, content):
author = None
(comments, self.is_inside_comment) = comment.handle_comment_block(self.is_inside_comment, self.extension, content)
def __handle_blamechunk_content__(self, content):
author = None
(comments, self.is_inside_comment) = comment.handle_comment_block(self.is_inside_comment, self.extension, content)
if self.blamechunk_is_prior and interval.get_since():
return
try:
author = self.changes.get_latest_author_by_email(self.blamechunk_email)
except KeyError:
return
if self.blamechunk_is_prior and interval.get_since():
return
try:
author = self.changes.get_latest_author_by_email(self.blamechunk_email)
except KeyError:
return
if not filtering.set_filtered(author, "author") and not \
filtering.set_filtered(self.blamechunk_email, "email") and not \
filtering.set_filtered(self.blamechunk_revision, "revision"):
if (
not filtering.set_filtered(author, "author")
and not filtering.set_filtered(self.blamechunk_email, "email")
and not filtering.set_filtered(self.blamechunk_revision, "revision")
):
__blame_lock__.acquire() # Global lock used to protect calls from here...
__blame_lock__.acquire() # Global lock used to protect calls from here...
if self.blames.get((author, self.filename), None) == None:
self.blames[(author, self.filename)] = BlameEntry()
if self.blames.get((author, self.filename), None) is None:
self.blames[(author, self.filename)] = BlameEntry()
self.blames[(author, self.filename)].comments += comments
self.blames[(author, self.filename)].rows += 1
self.blames[(author, self.filename)].comments += comments
self.blames[(author, self.filename)].rows += 1
if (self.blamechunk_time - self.changes.first_commit_date).days > 0:
self.blames[(author, self.filename)].skew += ((self.changes.last_commit_date - self.blamechunk_time).days /
(7.0 if self.useweeks else AVG_DAYS_PER_MONTH))
if (self.blamechunk_time - self.changes.first_commit_date).days > 0:
self.blames[(author, self.filename)].skew += (self.changes.last_commit_date - self.blamechunk_time).days / (
7.0 if self.useweeks else AVG_DAYS_PER_MONTH
)
__blame_lock__.release() # ...to here.
__blame_lock__.release() # ...to here.
def run(self):
git_blame_r = subprocess.Popen(self.blame_command, stdout=subprocess.PIPE).stdout
rows = git_blame_r.readlines()
git_blame_r.close()
def run(self):
git_blame_r = subprocess.Popen(self.blame_command, stdout=subprocess.PIPE).stdout
rows = git_blame_r.readlines()
git_blame_r.close()
self.__clear_blamechunk_info__()
self.__clear_blamechunk_info__()
#pylint: disable=W0201
for j in range(0, len(rows)):
row = rows[j].decode("utf-8", "replace").strip()
keyval = row.split(" ", 2)
# pylint: disable=W0201
for j in range(0, len(rows)):
row = rows[j].decode("utf-8", "replace").strip()
keyval = row.split(" ", 2)
if self.blamechunk_is_last:
self.__handle_blamechunk_content__(row)
self.__clear_blamechunk_info__()
elif keyval[0] == "boundary":
self.blamechunk_is_prior = True
elif keyval[0] == "author-mail":
self.blamechunk_email = keyval[1].lstrip("<").rstrip(">")
elif keyval[0] == "author-time":
self.blamechunk_time = datetime.date.fromtimestamp(int(keyval[1]))
elif keyval[0] == "filename":
self.blamechunk_is_last = True
elif Blame.is_revision(keyval[0]):
self.blamechunk_revision = keyval[0]
if self.blamechunk_is_last:
self.__handle_blamechunk_content__(row)
self.__clear_blamechunk_info__()
elif keyval[0] == "boundary":
self.blamechunk_is_prior = True
elif keyval[0] == "author-mail":
self.blamechunk_email = keyval[1].lstrip("<").rstrip(">")
elif keyval[0] == "author-time":
self.blamechunk_time = datetime.date.fromtimestamp(int(keyval[1]))
elif keyval[0] == "filename":
self.blamechunk_is_last = True
elif Blame.is_revision(keyval[0]):
self.blamechunk_revision = keyval[0]
__thread_lock__.release() # Lock controlling the number of threads running
__thread_lock__.release() # Lock controlling the number of threads running
PROGRESS_TEXT = N_("Checking how many rows belong to each author (2 of 2): {0:.0f}%")
class Blame(object):
def __init__(self, repo, hard, useweeks, changes):
self.blames = {}
ls_tree_p = subprocess.Popen(["git", "ls-tree", "--name-only", "-r", interval.get_ref()],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines = ls_tree_p.communicate()[0].splitlines()
ls_tree_p.stdout.close()
def __init__(self, repo, hard, useweeks, changes):
self.blames = {}
ls_tree_p = subprocess.Popen(
["git", "ls-tree", "--name-only", "-r", interval.get_ref()], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
lines = ls_tree_p.communicate()[0].splitlines()
ls_tree_p.stdout.close()
if ls_tree_p.returncode == 0:
progress_text = _(PROGRESS_TEXT)
if ls_tree_p.returncode == 0:
progress_text = _(PROGRESS_TEXT)
if repo != None:
progress_text = "[%s] " % repo.name + progress_text
if repo is not None:
progress_text = "[%s] " % repo.name + progress_text
for i, row in enumerate(lines):
row = row.strip().decode("unicode_escape", "ignore")
row = row.encode("latin-1", "replace")
row = row.decode("utf-8", "replace").strip("\"").strip("'").strip()
for i, row in enumerate(lines):
row = row.strip().decode("unicode_escape", "ignore")
row = row.encode("latin-1", "replace")
row = row.decode("utf-8", "replace").strip('"').strip("'").strip()
if FileDiff.get_extension(row) in extensions.get_located() and \
FileDiff.is_valid_extension(row) and not filtering.set_filtered(FileDiff.get_filename(row)):
blame_command = [_f for _f in ["git", "blame", "--line-porcelain", "-w"] + \
(["-C", "-C", "-M"] if hard else []) +
[interval.get_since(), interval.get_ref(), "--", row] if _f]
thread = BlameThread(useweeks, changes, blame_command, FileDiff.get_extension(row),
self.blames, row.strip())
thread.daemon = True
thread.start()
if (
FileDiff.get_extension(row) in extensions.get_located()
and FileDiff.is_valid_extension(row)
and not filtering.set_filtered(FileDiff.get_filename(row))
):
blame_command = [
_f
for _f in ["git", "blame", "--line-porcelain", "-w"]
+ (["-C", "-C", "-M"] if hard else [])
+ [interval.get_since(), interval.get_ref(), "--", row]
if _f
]
thread = BlameThread(
useweeks, changes, blame_command, FileDiff.get_extension(row), self.blames, row.strip()
)
thread.daemon = True
thread.start()
if format.is_interactive_format():
terminal.output_progress(progress_text, i, len(lines))
if format.is_interactive_format():
terminal.output_progress(progress_text, i, len(lines))
# Make sure all threads have completed.
for i in range(0, NUM_THREADS):
__thread_lock__.acquire()
# Make sure all threads have completed.
for i in range(0, NUM_THREADS):
__thread_lock__.acquire()
# We also have to release them for future use.
for i in range(0, NUM_THREADS):
__thread_lock__.release()
# We also have to release them for future use.
for i in range(0, NUM_THREADS):
__thread_lock__.release()
def __iadd__(self, other):
try:
self.blames.update(other.blames)
return self;
except AttributeError:
return other;
def __iadd__(self, other):
try:
self.blames.update(other.blames)
return self
except AttributeError:
return other
@staticmethod
def is_revision(string):
revision = re.search("([0-9a-f]{40})", string)
@staticmethod
def is_revision(string):
revision = re.search("([0-9a-f]{40})", string)
if revision == None:
return False
if revision is None:
return False
return revision.group(1).strip()
return revision.group(1).strip()
@staticmethod
def get_stability(author, blamed_rows, changes):
if author in changes.get_authorinfo_list():
author_insertions = changes.get_authorinfo_list()[author].insertions
return 100 if author_insertions == 0 else 100.0 * blamed_rows / author_insertions
return 100
@staticmethod
def get_stability(author, blamed_rows, changes):
if author in changes.get_authorinfo_list():
author_insertions = changes.get_authorinfo_list()[author].insertions
return 100 if author_insertions == 0 else 100.0 * blamed_rows / author_insertions
return 100
@staticmethod
def get_time(string):
time = re.search(r" \(.*?(\d\d\d\d-\d\d-\d\d)", string)
return time.group(1).strip()
@staticmethod
def get_time(string):
time = re.search(r" \(.*?(\d\d\d\d-\d\d-\d\d)", string)
return time.group(1).strip()
def get_summed_blames(self):
summed_blames = {}
for i in list(self.blames.items()):
if summed_blames.get(i[0][0], None) == None:
summed_blames[i[0][0]] = BlameEntry()
def get_summed_blames(self):
summed_blames = {}
for i in list(self.blames.items()):
if summed_blames.get(i[0][0], None) is None:
summed_blames[i[0][0]] = BlameEntry()
summed_blames[i[0][0]].rows += i[1].rows
summed_blames[i[0][0]].skew += i[1].skew
summed_blames[i[0][0]].comments += i[1].comments
summed_blames[i[0][0]].rows += i[1].rows
summed_blames[i[0][0]].skew += i[1].skew
summed_blames[i[0][0]].comments += i[1].comments
return summed_blames
return summed_blames

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import bisect
import datetime
import multiprocessing
@ -34,260 +33,291 @@ NUM_THREADS = multiprocessing.cpu_count()
__thread_lock__ = threading.BoundedSemaphore(NUM_THREADS)
__changes_lock__ = threading.Lock()
class FileDiff(object):
def __init__(self, string):
commit_line = string.split("|")
def __init__(self, string):
commit_line = string.split("|")
if commit_line.__len__() == 2:
self.name = commit_line[0].strip()
self.insertions = commit_line[1].count("+")
self.deletions = commit_line[1].count("-")
if commit_line.__len__() == 2:
self.name = commit_line[0].strip()
self.insertions = commit_line[1].count("+")
self.deletions = commit_line[1].count("-")
@staticmethod
def is_filediff_line(string):
string = string.split("|")
return string.__len__() == 2 and string[1].find("Bin") == -1 and ('+' in string[1] or '-' in string[1])
@staticmethod
def is_filediff_line(string):
string = string.split("|")
return string.__len__() == 2 and string[1].find("Bin") == -1 and ("+" in string[1] or "-" in string[1])
@staticmethod
def get_extension(string):
string = string.split("|")[0].strip().strip("{}").strip("\"").strip("'")
return os.path.splitext(string)[1][1:]
@staticmethod
def get_extension(string):
string = string.split("|")[0].strip().strip("{}").strip('"').strip("'")
return os.path.splitext(string)[1][1:]
@staticmethod
def get_filename(string):
return string.split("|")[0].strip().strip("{}").strip("\"").strip("'")
@staticmethod
def get_filename(string):
return string.split("|")[0].strip().strip("{}").strip('"').strip("'")
@staticmethod
def is_valid_extension(string):
extension = FileDiff.get_extension(string)
@staticmethod
def is_valid_extension(string):
extension = FileDiff.get_extension(string)
for i in extensions.get():
if (extension == "" and i == "*") or extension == i or i == "**":
return True
return False
for i in extensions.get():
if (extension == "" and i == "*") or extension == i or i == '**':
return True
return False
class Commit(object):
def __init__(self, string):
self.filediffs = []
commit_line = string.split("|")
def __init__(self, string):
self.filediffs = []
commit_line = string.split("|")
if commit_line.__len__() == 5:
self.timestamp = commit_line[0]
self.date = commit_line[1]
self.sha = commit_line[2]
self.author = commit_line[3].strip()
self.email = commit_line[4].strip()
if commit_line.__len__() == 5:
self.timestamp = commit_line[0]
self.date = commit_line[1]
self.sha = commit_line[2]
self.author = commit_line[3].strip()
self.email = commit_line[4].strip()
def __lt__(self, other):
return self.timestamp.__lt__(other.timestamp) # only used for sorting; we just consider the timestamp.
def __lt__(self, other):
return self.timestamp.__lt__(other.timestamp) # only used for sorting; we just consider the timestamp.
def add_filediff(self, filediff):
self.filediffs.append(filediff)
def add_filediff(self, filediff):
self.filediffs.append(filediff)
def get_filediffs(self):
return self.filediffs
def get_filediffs(self):
return self.filediffs
@staticmethod
def get_author_and_email(string):
commit_line = string.split("|")
@staticmethod
def get_author_and_email(string):
commit_line = string.split("|")
if commit_line.__len__() == 5:
return (commit_line[3].strip(), commit_line[4].strip())
if commit_line.__len__() == 5:
return (commit_line[3].strip(), commit_line[4].strip())
@staticmethod
def is_commit_line(string):
return string.split("|").__len__() == 5
@staticmethod
def is_commit_line(string):
return string.split("|").__len__() == 5
class AuthorInfo(object):
email = None
insertions = 0
deletions = 0
commits = 0
email = None
insertions = 0
deletions = 0
commits = 0
class ChangesThread(threading.Thread):
def __init__(self, hard, changes, first_hash, second_hash, offset):
__thread_lock__.acquire() # Lock controlling the number of threads running
threading.Thread.__init__(self)
def __init__(self, hard, changes, first_hash, second_hash, offset):
__thread_lock__.acquire() # Lock controlling the number of threads running
threading.Thread.__init__(self)
self.hard = hard
self.changes = changes
self.first_hash = first_hash
self.second_hash = second_hash
self.offset = offset
self.hard = hard
self.changes = changes
self.first_hash = first_hash
self.second_hash = second_hash
self.offset = offset
@staticmethod
def create(hard, changes, first_hash, second_hash, offset):
thread = ChangesThread(hard, changes, first_hash, second_hash, offset)
thread.daemon = True
thread.start()
@staticmethod
def create(hard, changes, first_hash, second_hash, offset):
thread = ChangesThread(hard, changes, first_hash, second_hash, offset)
thread.daemon = True
thread.start()
def run(self):
git_log_r = subprocess.Popen([_f for _f in ["git", "log", "--reverse", "--pretty=%ct|%cd|%H|%aN|%aE",
"--stat=100000,8192", "--no-merges", "-w", interval.get_since(),
interval.get_until(), "--date=short"] + (["-C", "-C", "-M"] if self.hard else []) +
[self.first_hash + self.second_hash] if _f], stdout=subprocess.PIPE).stdout
lines = git_log_r.readlines()
git_log_r.close()
def run(self):
git_log_r = subprocess.Popen(
[
_f
for _f in [
"git",
"log",
"--reverse",
"--pretty=%ct|%cd|%H|%aN|%aE",
"--stat=100000,8192",
"--no-merges",
"-w",
interval.get_since(),
interval.get_until(),
"--date=short",
]
+ (["-C", "-C", "-M"] if self.hard else [])
+ [self.first_hash + self.second_hash]
if _f
],
stdout=subprocess.PIPE,
).stdout
lines = git_log_r.readlines()
git_log_r.close()
commit = None
found_valid_extension = False
is_filtered = False
commits = []
commit = None
found_valid_extension = False
is_filtered = False
commits = []
__changes_lock__.acquire() # Global lock used to protect calls from here...
__changes_lock__.acquire() # Global lock used to protect calls from here...
for i in lines:
j = i.strip().decode("unicode_escape", "ignore")
j = j.encode("latin-1", "replace")
j = j.decode("utf-8", "replace")
for i in lines:
j = i.strip().decode("unicode_escape", "ignore")
j = j.encode("latin-1", "replace")
j = j.decode("utf-8", "replace")
if Commit.is_commit_line(j):
(author, email) = Commit.get_author_and_email(j)
self.changes.emails_by_author[author] = email
self.changes.authors_by_email[email] = author
if Commit.is_commit_line(j):
(author, email) = Commit.get_author_and_email(j)
self.changes.emails_by_author[author] = email
self.changes.authors_by_email[email] = author
if Commit.is_commit_line(j) or i is lines[-1]:
if found_valid_extension:
bisect.insort(commits, commit)
if Commit.is_commit_line(j) or i is lines[-1]:
if found_valid_extension:
bisect.insort(commits, commit)
found_valid_extension = False
is_filtered = False
commit = Commit(j)
found_valid_extension = False
is_filtered = False
commit = Commit(j)
if Commit.is_commit_line(j) and \
(filtering.set_filtered(commit.author, "author") or \
filtering.set_filtered(commit.email, "email") or \
filtering.set_filtered(commit.sha, "revision") or \
filtering.set_filtered(commit.sha, "message")):
is_filtered = True
if Commit.is_commit_line(j) and (
filtering.set_filtered(commit.author, "author")
or filtering.set_filtered(commit.email, "email")
or filtering.set_filtered(commit.sha, "revision")
or filtering.set_filtered(commit.sha, "message")
):
is_filtered = True
if FileDiff.is_filediff_line(j) and not \
filtering.set_filtered(FileDiff.get_filename(j)) and not is_filtered:
extensions.add_located(FileDiff.get_extension(j))
if FileDiff.is_filediff_line(j) and not filtering.set_filtered(FileDiff.get_filename(j)) and not is_filtered:
extensions.add_located(FileDiff.get_extension(j))
if FileDiff.is_valid_extension(j):
found_valid_extension = True
filediff = FileDiff(j)
commit.add_filediff(filediff)
if FileDiff.is_valid_extension(j):
found_valid_extension = True
filediff = FileDiff(j)
commit.add_filediff(filediff)
self.changes.commits[self.offset // CHANGES_PER_THREAD] = commits
__changes_lock__.release() # ...to here.
__thread_lock__.release() # Lock controlling the number of threads running
self.changes.commits[self.offset // CHANGES_PER_THREAD] = commits
__changes_lock__.release() # ...to here.
__thread_lock__.release() # Lock controlling the number of threads running
PROGRESS_TEXT = N_("Fetching and calculating primary statistics (1 of 2): {0:.0f}%")
class Changes(object):
authors = {}
authors_dateinfo = {}
authors_by_email = {}
emails_by_author = {}
authors = {}
authors_dateinfo = {}
authors_by_email = {}
emails_by_author = {}
def __init__(self, repo, hard):
self.commits = []
interval.set_ref("HEAD");
git_rev_list_p = subprocess.Popen([_f for _f in ["git", "rev-list", "--reverse", "--no-merges",
interval.get_since(), interval.get_until(), "HEAD"] if _f],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines = git_rev_list_p.communicate()[0].splitlines()
git_rev_list_p.stdout.close()
def __init__(self, repo, hard):
self.commits = []
interval.set_ref("HEAD")
git_rev_list_p = subprocess.Popen(
[
_f
for _f in ["git", "rev-list", "--reverse", "--no-merges", interval.get_since(), interval.get_until(), "HEAD"]
if _f
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
lines = git_rev_list_p.communicate()[0].splitlines()
git_rev_list_p.stdout.close()
if git_rev_list_p.returncode == 0 and len(lines) > 0:
progress_text = _(PROGRESS_TEXT)
if repo != None:
progress_text = "[%s] " % repo.name + progress_text
if git_rev_list_p.returncode == 0 and len(lines) > 0:
progress_text = _(PROGRESS_TEXT)
if repo is not None:
progress_text = "[%s] " % repo.name + progress_text
chunks = len(lines) // CHANGES_PER_THREAD
self.commits = [None] * (chunks if len(lines) % CHANGES_PER_THREAD == 0 else chunks + 1)
first_hash = ""
chunks = len(lines) // CHANGES_PER_THREAD
self.commits = [None] * (chunks if len(lines) % CHANGES_PER_THREAD == 0 else chunks + 1)
first_hash = ""
for i, entry in enumerate(lines):
if i % CHANGES_PER_THREAD == CHANGES_PER_THREAD - 1:
entry = entry.decode("utf-8", "replace").strip()
second_hash = entry
ChangesThread.create(hard, self, first_hash, second_hash, i)
first_hash = entry + ".."
for i, entry in enumerate(lines):
if i % CHANGES_PER_THREAD == CHANGES_PER_THREAD - 1:
entry = entry.decode("utf-8", "replace").strip()
second_hash = entry
ChangesThread.create(hard, self, first_hash, second_hash, i)
first_hash = entry + ".."
if format.is_interactive_format():
terminal.output_progress(progress_text, i, len(lines))
else:
if CHANGES_PER_THREAD - 1 != i % CHANGES_PER_THREAD:
entry = entry.decode("utf-8", "replace").strip()
second_hash = entry
ChangesThread.create(hard, self, first_hash, second_hash, i)
if format.is_interactive_format():
terminal.output_progress(progress_text, i, len(lines))
else:
if CHANGES_PER_THREAD - 1 != i % CHANGES_PER_THREAD:
entry = entry.decode("utf-8", "replace").strip()
second_hash = entry
ChangesThread.create(hard, self, first_hash, second_hash, i)
# Make sure all threads have completed.
for i in range(0, NUM_THREADS):
__thread_lock__.acquire()
# Make sure all threads have completed.
for i in range(0, NUM_THREADS):
__thread_lock__.acquire()
# We also have to release them for future use.
for i in range(0, NUM_THREADS):
__thread_lock__.release()
# We also have to release them for future use.
for i in range(0, NUM_THREADS):
__thread_lock__.release()
self.commits = [item for sublist in self.commits for item in sublist]
self.commits = [item for sublist in self.commits for item in sublist]
if len(self.commits) > 0:
if interval.has_interval():
interval.set_ref(self.commits[-1].sha)
if len(self.commits) > 0:
if interval.has_interval():
interval.set_ref(self.commits[-1].sha)
self.first_commit_date = datetime.date(int(self.commits[0].date[0:4]), int(self.commits[0].date[5:7]),
int(self.commits[0].date[8:10]))
self.last_commit_date = datetime.date(int(self.commits[-1].date[0:4]), int(self.commits[-1].date[5:7]),
int(self.commits[-1].date[8:10]))
self.first_commit_date = datetime.date(
int(self.commits[0].date[0:4]), int(self.commits[0].date[5:7]), int(self.commits[0].date[8:10])
)
self.last_commit_date = datetime.date(
int(self.commits[-1].date[0:4]), int(self.commits[-1].date[5:7]), int(self.commits[-1].date[8:10])
)
def __iadd__(self, other):
try:
self.authors.update(other.authors)
self.authors_dateinfo.update(other.authors_dateinfo)
self.authors_by_email.update(other.authors_by_email)
self.emails_by_author.update(other.emails_by_author)
def __iadd__(self, other):
try:
self.authors.update(other.authors)
self.authors_dateinfo.update(other.authors_dateinfo)
self.authors_by_email.update(other.authors_by_email)
self.emails_by_author.update(other.emails_by_author)
for commit in other.commits:
bisect.insort(self.commits, commit)
if not self.commits and not other.commits:
self.commits = []
for commit in other.commits:
bisect.insort(self.commits, commit)
if not self.commits and not other.commits:
self.commits = []
return self
except AttributeError:
return other
return self
except AttributeError:
return other
def get_commits(self):
return self.commits
def get_commits(self):
return self.commits
@staticmethod
def modify_authorinfo(authors, key, commit):
if authors.get(key, None) == None:
authors[key] = AuthorInfo()
@staticmethod
def modify_authorinfo(authors, key, commit):
if authors.get(key, None) is None:
authors[key] = AuthorInfo()
if commit.get_filediffs():
authors[key].commits += 1
if commit.get_filediffs():
authors[key].commits += 1
for j in commit.get_filediffs():
authors[key].insertions += j.insertions
authors[key].deletions += j.deletions
for j in commit.get_filediffs():
authors[key].insertions += j.insertions
authors[key].deletions += j.deletions
def get_authorinfo_list(self):
if not self.authors:
for i in self.commits:
Changes.modify_authorinfo(self.authors, i.author, i)
def get_authorinfo_list(self):
if not self.authors:
for i in self.commits:
Changes.modify_authorinfo(self.authors, i.author, i)
return self.authors
return self.authors
def get_authordateinfo_list(self):
if not self.authors_dateinfo:
for i in self.commits:
Changes.modify_authorinfo(self.authors_dateinfo, (i.date, i.author), i)
def get_authordateinfo_list(self):
if not self.authors_dateinfo:
for i in self.commits:
Changes.modify_authorinfo(self.authors_dateinfo, (i.date, i.author), i)
return self.authors_dateinfo
return self.authors_dateinfo
def get_latest_author_by_email(self, name):
if not hasattr(name, "decode"):
name = str.encode(name)
try:
name = name.decode("unicode_escape", "ignore")
except UnicodeEncodeError:
pass
def get_latest_author_by_email(self, name):
if not hasattr(name, "decode"):
name = str.encode(name)
try:
name = name.decode("unicode_escape", "ignore")
except UnicodeEncodeError:
pass
return self.authors_by_email[name]
return self.authors_by_email[name]
def get_latest_email_by_author(self, name):
return self.emails_by_author[name]
def get_latest_email_by_author(self, name):
return self.emails_by_author[name]

View File

@ -25,34 +25,41 @@ import sys
import tempfile
try:
from urllib.parse import urlparse
from urllib.parse import urlparse
except:
from urllib.parse import urlparse
from urllib.parse import urlparse
__cloned_paths__ = []
def create(url):
class Repository(object):
def __init__(self, name, location):
self.name = name
self.location = location
class Repository(object):
def __init__(self, name, location):
self.name = name
self.location = location
parsed_url = urlparse(url)
parsed_url = urlparse(url)
if parsed_url.scheme == "file" or parsed_url.scheme == "git" or parsed_url.scheme == "http" or \
parsed_url.scheme == "https" or parsed_url.scheme == "ssh":
path = tempfile.mkdtemp(suffix=".gitinspector")
git_clone = subprocess.Popen(["git", "clone", url, path], stdout=sys.stderr)
git_clone.wait()
if (
parsed_url.scheme == "file"
or parsed_url.scheme == "git"
or parsed_url.scheme == "http"
or parsed_url.scheme == "https"
or parsed_url.scheme == "ssh"
):
path = tempfile.mkdtemp(suffix=".gitinspector")
git_clone = subprocess.Popen(["git", "clone", url, path], stdout=sys.stderr)
git_clone.wait()
if git_clone.returncode != 0:
sys.exit(git_clone.returncode)
if git_clone.returncode != 0:
sys.exit(git_clone.returncode)
__cloned_paths__.append(path)
return Repository(os.path.basename(parsed_url.path), path)
__cloned_paths__.append(path)
return Repository(os.path.basename(parsed_url.path), path)
return Repository(None, os.path.abspath(url))
return Repository(None, os.path.abspath(url))
def delete():
for path in __cloned_paths__:
shutil.rmtree(path, ignore_errors=True)
for path in __cloned_paths__:
shutil.rmtree(path, ignore_errors=True)

View File

@ -18,61 +18,139 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
__comment_begining__ = {
"java": "/*",
"c": "/*",
"cc": "/*",
"cpp": "/*",
"cs": "/*",
"h": "/*",
"hh": "/*",
"hpp": "/*",
"hs": "{-",
"html": "<!--",
"php": "/*",
"py": '"""',
"glsl": "/*",
"rb": "=begin",
"js": "/*",
"jspx": "<!--",
"scala": "/*",
"sql": "/*",
"tex": "\\begin{comment}",
"xhtml": "<!--",
"xml": "<!--",
"ml": "(*",
"mli": "(*",
"go": "/*",
"ly": "%{",
"ily": "%{",
}
__comment_begining__ = {"java": "/*", "c": "/*", "cc": "/*", "cpp": "/*", "cs": "/*", "h": "/*", "hh": "/*", "hpp": "/*",
"hs": "{-", "html": "<!--", "php": "/*", "py": "\"\"\"", "glsl": "/*", "rb": "=begin", "js": "/*",
"jspx": "<!--", "scala": "/*", "sql": "/*", "tex": "\\begin{comment}", "xhtml": "<!--",
"xml": "<!--", "ml": "(*", "mli": "(*", "go": "/*", "ly": "%{", "ily": "%{"}
__comment_end__ = {
"java": "*/",
"c": "*/",
"cc": "*/",
"cpp": "*/",
"cs": "*/",
"h": "*/",
"hh": "*/",
"hpp": "*/",
"hs": "-}",
"html": "-->",
"php": "*/",
"py": '"""',
"glsl": "*/",
"rb": "=end",
"js": "*/",
"jspx": "-->",
"scala": "*/",
"sql": "*/",
"tex": "\\end{comment}",
"xhtml": "-->",
"xml": "-->",
"ml": "*)",
"mli": "*)",
"go": "*/",
"ly": "%}",
"ily": "%}",
}
__comment_end__ = {"java": "*/", "c": "*/", "cc": "*/", "cpp": "*/", "cs": "*/", "h": "*/", "hh": "*/", "hpp": "*/", "hs": "-}",
"html": "-->", "php": "*/", "py": "\"\"\"", "glsl": "*/", "rb": "=end", "js": "*/", "jspx": "-->",
"scala": "*/", "sql": "*/", "tex": "\\end{comment}", "xhtml": "-->", "xml": "-->", "ml": "*)", "mli": "*)",
"go": "*/", "ly": "%}", "ily": "%}"}
__comment__ = {"java": "//", "c": "//", "cc": "//", "cpp": "//", "cs": "//", "h": "//", "hh": "//", "hpp": "//", "hs": "--",
"pl": "#", "php": "//", "py": "#", "glsl": "//", "rb": "#", "robot": "#", "rs": "//", "rlib": "//", "js": "//",
"scala": "//", "sql": "--", "tex": "%", "ada": "--", "ads": "--", "adb": "--", "pot": "#", "po": "#", "go": "//",
"ly": "%", "ily": "%"}
__comment__ = {
"java": "//",
"c": "//",
"cc": "//",
"cpp": "//",
"cs": "//",
"h": "//",
"hh": "//",
"hpp": "//",
"hs": "--",
"pl": "#",
"php": "//",
"py": "#",
"glsl": "//",
"rb": "#",
"robot": "#",
"rs": "//",
"rlib": "//",
"js": "//",
"scala": "//",
"sql": "--",
"tex": "%",
"ada": "--",
"ads": "--",
"adb": "--",
"pot": "#",
"po": "#",
"go": "//",
"ly": "%",
"ily": "%",
}
__comment_markers_must_be_at_begining__ = {"tex": True}
def __has_comment_begining__(extension, string):
if __comment_markers_must_be_at_begining__.get(extension, None) == True:
return string.find(__comment_begining__[extension]) == 0
elif __comment_begining__.get(extension, None) != None and string.find(__comment_end__[extension], 2) == -1:
return string.find(__comment_begining__[extension]) != -1
return False
def __has_comment_begining__(extension, string):
if __comment_markers_must_be_at_begining__.get(extension, None):
return string.find(__comment_begining__[extension]) == 0
elif __comment_begining__.get(extension, None) is not None and string.find(__comment_end__[extension], 2) == -1:
return string.find(__comment_begining__[extension]) != -1
return False
def __has_comment_end__(extension, string):
if __comment_markers_must_be_at_begining__.get(extension, None) == True:
return string.find(__comment_end__[extension]) == 0
elif __comment_end__.get(extension, None) != None:
return string.find(__comment_end__[extension]) != -1
if __comment_markers_must_be_at_begining__.get(extension, None):
return string.find(__comment_end__[extension]) == 0
elif __comment_end__.get(extension, None) is not None:
return string.find(__comment_end__[extension]) != -1
return False
return False
def is_comment(extension, string):
if __comment_begining__.get(extension, None) != None and string.strip().startswith(__comment_begining__[extension]):
return True
if __comment_end__.get(extension, None) != None and string.strip().endswith(__comment_end__[extension]):
return True
if __comment__.get(extension, None) != None and string.strip().startswith(__comment__[extension]):
return True
if __comment_begining__.get(extension, None) is not None and string.strip().startswith(__comment_begining__[extension]):
return True
if __comment_end__.get(extension, None) is not None and string.strip().endswith(__comment_end__[extension]):
return True
if __comment__.get(extension, None) is not None and string.strip().startswith(__comment__[extension]):
return True
return False
return False
def handle_comment_block(is_inside_comment, extension, content):
comments = 0
comments = 0
if is_comment(extension, content):
comments += 1
if is_inside_comment:
if __has_comment_end__(extension, content):
is_inside_comment = False
else:
comments += 1
elif __has_comment_begining__(extension, content) and not __has_comment_end__(extension, content):
is_inside_comment = True
if is_comment(extension, content):
comments += 1
if is_inside_comment:
if __has_comment_end__(extension, content):
is_inside_comment = False
else:
comments += 1
elif __has_comment_begining__(extension, content) and not __has_comment_end__(extension, content):
is_inside_comment = True
return (comments, is_inside_comment)
return (comments, is_inside_comment)

View File

@ -22,72 +22,75 @@ import os
import subprocess
from . import extensions, filtering, format, interval, optval
class GitConfig(object):
def __init__(self, run, repo, global_only=False):
self.run = run
self.repo = repo
self.global_only = global_only
def __init__(self, run, repo, global_only=False):
self.run = run
self.repo = repo
self.global_only = global_only
def __read_git_config__(self, variable):
previous_directory = os.getcwd()
os.chdir(self.repo)
setting = subprocess.Popen([_f for _f in ["git", "config", "--global" if self.global_only else "",
"inspector." + variable] if _f], stdout=subprocess.PIPE).stdout
os.chdir(previous_directory)
def __read_git_config__(self, variable):
previous_directory = os.getcwd()
os.chdir(self.repo)
setting = subprocess.Popen(
[_f for _f in ["git", "config", "--global" if self.global_only else "", "inspector." + variable] if _f],
stdout=subprocess.PIPE,
).stdout
os.chdir(previous_directory)
try:
setting = setting.readlines()[0]
setting = setting.decode("utf-8", "replace").strip()
except IndexError:
setting = ""
try:
setting = setting.readlines()[0]
setting = setting.decode("utf-8", "replace").strip()
except IndexError:
setting = ""
return setting
return setting
def __read_git_config_bool__(self, variable):
try:
variable = self.__read_git_config__(variable)
return optval.get_boolean_argument(False if variable == "" else variable)
except optval.InvalidOptionArgument:
return False
def __read_git_config_bool__(self, variable):
try:
variable = self.__read_git_config__(variable)
return optval.get_boolean_argument(False if variable == "" else variable)
except optval.InvalidOptionArgument:
return False
def __read_git_config_string__(self, variable):
string = self.__read_git_config__(variable)
return (True, string) if len(string) > 0 else (False, None)
def __read_git_config_string__(self, variable):
string = self.__read_git_config__(variable)
return (True, string) if len(string) > 0 else (False, None)
def read(self):
var = self.__read_git_config_string__("file-types")
if var[0]:
extensions.define(var[1])
def read(self):
var = self.__read_git_config_string__("file-types")
if var[0]:
extensions.define(var[1])
var = self.__read_git_config_string__("exclude")
if var[0]:
filtering.add(var[1])
var = self.__read_git_config_string__("exclude")
if var[0]:
filtering.add(var[1])
var = self.__read_git_config_string__("format")
if var[0] and not format.select(var[1]):
raise format.InvalidFormatError(_("specified output format not supported."))
var = self.__read_git_config_string__("format")
if var[0] and not format.select(var[1]):
raise format.InvalidFormatError(_("specified output format not supported."))
self.run.hard = self.__read_git_config_bool__("hard")
self.run.list_file_types = self.__read_git_config_bool__("list-file-types")
self.run.localize_output = self.__read_git_config_bool__("localize-output")
self.run.metrics = self.__read_git_config_bool__("metrics")
self.run.responsibilities = self.__read_git_config_bool__("responsibilities")
self.run.useweeks = self.__read_git_config_bool__("weeks")
self.run.hard = self.__read_git_config_bool__("hard")
self.run.list_file_types = self.__read_git_config_bool__("list-file-types")
self.run.localize_output = self.__read_git_config_bool__("localize-output")
self.run.metrics = self.__read_git_config_bool__("metrics")
self.run.responsibilities = self.__read_git_config_bool__("responsibilities")
self.run.useweeks = self.__read_git_config_bool__("weeks")
var = self.__read_git_config_string__("since")
if var[0]:
interval.set_since(var[1])
var = self.__read_git_config_string__("since")
if var[0]:
interval.set_since(var[1])
var = self.__read_git_config_string__("until")
if var[0]:
interval.set_until(var[1])
var = self.__read_git_config_string__("until")
if var[0]:
interval.set_until(var[1])
self.run.timeline = self.__read_git_config_bool__("timeline")
self.run.timeline = self.__read_git_config_bool__("timeline")
if self.__read_git_config_bool__("grading"):
self.run.hard = True
self.run.list_file_types = True
self.run.metrics = True
self.run.responsibilities = True
self.run.timeline = True
self.run.useweeks = True
if self.__read_git_config_bool__("grading"):
self.run.hard = True
self.run.list_file_types = True
self.run.metrics = True
self.run.responsibilities = True
self.run.timeline = True
self.run.useweeks = True

View File

@ -18,24 +18,27 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
DEFAULT_EXTENSIONS = ["java", "c", "cc", "cpp", "h", "hh", "hpp", "py", "glsl", "rb", "js", "sql"]
__extensions__ = DEFAULT_EXTENSIONS
__located_extensions__ = set()
def get():
return __extensions__
return __extensions__
def define(string):
global __extensions__
__extensions__ = string.split(",")
global __extensions__
__extensions__ = string.split(",")
def add_located(string):
if len(string) == 0:
__located_extensions__.add("*")
else:
__located_extensions__.add(string)
if len(string) == 0:
__located_extensions__.add("*")
else:
__located_extensions__.add(string)
def get_located():
return __located_extensions__
return __located_extensions__

View File

@ -21,69 +21,84 @@
import re
import subprocess
__filters__ = {"file": [set(), set()], "author": [set(), set()], "email": [set(), set()], "revision": [set(), set()],
"message" : [set(), None]}
__filters__ = {
"file": [set(), set()],
"author": [set(), set()],
"email": [set(), set()],
"revision": [set(), set()],
"message": [set(), None],
}
class InvalidRegExpError(ValueError):
def __init__(self, msg):
super(InvalidRegExpError, self).__init__(msg)
self.msg = msg
def __init__(self, msg):
super(InvalidRegExpError, self).__init__(msg)
self.msg = msg
def get():
return __filters__
return __filters__
def __add_one__(string):
for i in __filters__:
if (i + ":").lower() == string[0:len(i) + 1].lower():
__filters__[i][0].add(string[len(i) + 1:])
return
__filters__["file"][0].add(string)
for i in __filters__:
if (i + ":").lower() == string[0:len(i) + 1].lower():
__filters__[i][0].add(string[len(i) + 1:])
return
__filters__["file"][0].add(string)
def add(string):
rules = string.split(",")
for rule in rules:
__add_one__(rule)
rules = string.split(",")
for rule in rules:
__add_one__(rule)
def clear():
for i in __filters__:
__filters__[i][0] = set()
for i in __filters__:
__filters__[i][0] = set()
def get_filered(filter_type="file"):
return __filters__[filter_type][1]
return __filters__[filter_type][1]
def has_filtered():
for i in __filters__:
if __filters__[i][1]:
return True
return False
for i in __filters__:
if __filters__[i][1]:
return True
return False
def __find_commit_message__(sha):
git_show_r = subprocess.Popen([_f for _f in ["git", "show", "-s", "--pretty=%B", "-w", sha] if _f],
stdout=subprocess.PIPE).stdout
git_show_r = subprocess.Popen(
[_f for _f in ["git", "show", "-s", "--pretty=%B", "-w", sha] if _f], stdout=subprocess.PIPE
).stdout
commit_message = git_show_r.read()
git_show_r.close()
commit_message = git_show_r.read()
git_show_r.close()
commit_message = commit_message.strip().decode("unicode_escape", "ignore")
commit_message = commit_message.encode("latin-1", "replace")
return commit_message.decode("utf-8", "replace")
commit_message = commit_message.strip().decode("unicode_escape", "ignore")
commit_message = commit_message.encode("latin-1", "replace")
return commit_message.decode("utf-8", "replace")
def set_filtered(string, filter_type="file"):
string = string.strip()
string = string.strip()
if len(string) > 0:
for i in __filters__[filter_type][0]:
search_for = string
if len(string) > 0:
for i in __filters__[filter_type][0]:
search_for = string
if filter_type == "message":
search_for = __find_commit_message__(string)
try:
if re.search(i, search_for) != None:
if filter_type == "message":
__add_one__("revision:" + string)
else:
__filters__[filter_type][1].add(string)
return True
except:
raise InvalidRegExpError(_("invalid regular expression specified"))
return False
if filter_type == "message":
search_for = __find_commit_message__(string)
try:
if re.search(i, search_for) is not None:
if filter_type == "message":
__add_one__("revision:" + string)
else:
__filters__[filter_type][1].add(string)
return True
except:
raise InvalidRegExpError(_("invalid regular expression specified"))
return False

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import base64
import os
import textwrap
@ -33,122 +32,142 @@ DEFAULT_FORMAT = __available_formats__[3]
__selected_format__ = DEFAULT_FORMAT
class InvalidFormatError(Exception):
def __init__(self, msg):
super(InvalidFormatError, self).__init__(msg)
self.msg = msg
def __init__(self, msg):
super(InvalidFormatError, self).__init__(msg)
self.msg = msg
def select(format):
global __selected_format__
__selected_format__ = format
global __selected_format__
__selected_format__ = format
return format in __available_formats__
return format in __available_formats__
def get_selected():
return __selected_format__
return __selected_format__
def is_interactive_format():
return __selected_format__ == "text"
return __selected_format__ == "text"
def __output_html_template__(name):
template_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), name)
file_r = open(template_path, "rb")
template = file_r.read().decode("utf-8", "replace")
template_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), name)
file_r = open(template_path, "rb")
template = file_r.read().decode("utf-8", "replace")
file_r.close()
return template
file_r.close()
return template
def __get_zip_file_content__(name, file_name="/html/flot.zip"):
zip_file = zipfile.ZipFile(basedir.get_basedir() + file_name, "r")
content = zip_file.read(name)
zip_file = zipfile.ZipFile(basedir.get_basedir() + file_name, "r")
content = zip_file.read(name)
zip_file.close()
return content.decode("utf-8", "replace")
zip_file.close()
return content.decode("utf-8", "replace")
INFO_ONE_REPOSITORY = N_("Statistical information for the repository '{0}' was gathered on {1}.")
INFO_MANY_REPOSITORIES = N_("Statistical information for the repositories '{0}' was gathered on {1}.")
def output_header(repos):
repos_string = ", ".join([repo.name for repo in repos])
repos_string = ", ".join([repo.name for repo in repos])
if __selected_format__ == "html" or __selected_format__ == "htmlembedded":
base = basedir.get_basedir()
html_header = __output_html_template__(base + "/html/html.header")
tablesorter_js = __get_zip_file_content__("jquery.tablesorter.min.js",
"/html/jquery.tablesorter.min.js.zip").encode("latin-1", "replace")
tablesorter_js = tablesorter_js.decode("utf-8", "ignore")
flot_js = __get_zip_file_content__("jquery.flot.js")
pie_js = __get_zip_file_content__("jquery.flot.pie.js")
resize_js = __get_zip_file_content__("jquery.flot.resize.js")
if __selected_format__ == "html" or __selected_format__ == "htmlembedded":
base = basedir.get_basedir()
html_header = __output_html_template__(base + "/html/html.header")
tablesorter_js = __get_zip_file_content__("jquery.tablesorter.min.js", "/html/jquery.tablesorter.min.js.zip").encode(
"latin-1", "replace"
)
tablesorter_js = tablesorter_js.decode("utf-8", "ignore")
flot_js = __get_zip_file_content__("jquery.flot.js")
pie_js = __get_zip_file_content__("jquery.flot.pie.js")
resize_js = __get_zip_file_content__("jquery.flot.resize.js")
logo_file = open(base + "/html/gitinspector_piclet.png", "rb")
logo = logo_file.read()
logo_file.close()
logo = base64.b64encode(logo)
logo_file = open(base + "/html/gitinspector_piclet.png", "rb")
logo = logo_file.read()
logo_file.close()
logo = base64.b64encode(logo)
if __selected_format__ == "htmlembedded":
jquery_js = ">" + __get_zip_file_content__("jquery.js")
else:
jquery_js = " src=\"https://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js\">"
if __selected_format__ == "htmlembedded":
jquery_js = ">" + __get_zip_file_content__("jquery.js")
else:
jquery_js = ' src="https://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js">'
print(html_header.format(title=_("Repository statistics for '{0}'").format(repos_string),
jquery=jquery_js,
jquery_tablesorter=tablesorter_js,
jquery_flot=flot_js,
jquery_flot_pie=pie_js,
jquery_flot_resize=resize_js,
logo=logo.decode("utf-8", "replace"),
logo_text=_("The output has been generated by {0} {1}. The statistical analysis tool"
" for git repositories.").format(
"<a href=\"https://github.com/ejwa/gitinspector\">gitinspector</a>",
version.__version__),
repo_text=_(INFO_ONE_REPOSITORY if len(repos) <= 1 else INFO_MANY_REPOSITORIES).format(
repos_string, localization.get_date()),
show_minor_authors=_("Show minor authors"),
hide_minor_authors=_("Hide minor authors"),
show_minor_rows=_("Show rows with minor work"),
hide_minor_rows=_("Hide rows with minor work")))
elif __selected_format__ == "json":
print("{\n\t\"gitinspector\": {")
print("\t\t\"version\": \"" + version.__version__ + "\",")
print(
html_header.format(
title=_("Repository statistics for '{0}'").format(repos_string),
jquery=jquery_js,
jquery_tablesorter=tablesorter_js,
jquery_flot=flot_js,
jquery_flot_pie=pie_js,
jquery_flot_resize=resize_js,
logo=logo.decode("utf-8", "replace"),
logo_text=_(
"The output has been generated by {0} {1}. The statistical analysis tool" " for git repositories."
).format('<a href="https://github.com/ejwa/gitinspector">gitinspector</a>', version.__version__),
repo_text=_(INFO_ONE_REPOSITORY if len(repos) <= 1 else INFO_MANY_REPOSITORIES).format(
repos_string, localization.get_date()
),
show_minor_authors=_("Show minor authors"),
hide_minor_authors=_("Hide minor authors"),
show_minor_rows=_("Show rows with minor work"),
hide_minor_rows=_("Hide rows with minor work"),
)
)
elif __selected_format__ == "json":
print('{\n\t"gitinspector": {')
print('\t\t"version": "' + version.__version__ + '",')
if len(repos) <= 1:
print("\t\t\"repository\": \"" + repos_string + "\",")
else:
repos_json = "\t\t\"repositories\": [ "
if len(repos) <= 1:
print('\t\t"repository": "' + repos_string + '",')
else:
repos_json = '\t\t"repositories": [ '
for repo in repos:
repos_json += "\"" + repo.name + "\", "
for repo in repos:
repos_json += '"' + repo.name + '", '
print(repos_json[:-2] + " ],")
print(repos_json[:-2] + " ],")
print("\t\t\"report_date\": \"" + time.strftime("%Y/%m/%d") + "\",")
print('\t\t"report_date": "' + time.strftime("%Y/%m/%d") + '",')
elif __selected_format__ == "xml":
print("<gitinspector>")
print("\t<version>" + version.__version__ + "</version>")
elif __selected_format__ == "xml":
print("<gitinspector>")
print("\t<version>" + version.__version__ + "</version>")
if len(repos) <= 1:
print("\t<repository>" + repos_string + "</repository>")
else:
print("\t<repositories>")
if len(repos) <= 1:
print("\t<repository>" + repos_string + "</repository>")
else:
print("\t<repositories>")
for repo in repos:
print("\t\t<repository>" + repo.name + "</repository>")
for repo in repos:
print("\t\t<repository>" + repo.name + "</repository>")
print("\t</repositories>")
print("\t</repositories>")
print("\t<report-date>" + time.strftime("%Y/%m/%d") + "</report-date>")
else:
print(
textwrap.fill(
_(INFO_ONE_REPOSITORY if len(repos) <= 1 else INFO_MANY_REPOSITORIES).format(
repos_string, localization.get_date()
),
width=terminal.get_size()[0],
)
)
print("\t<report-date>" + time.strftime("%Y/%m/%d") + "</report-date>")
else:
print(textwrap.fill(_(INFO_ONE_REPOSITORY if len(repos) <= 1 else INFO_MANY_REPOSITORIES).format(
repos_string, localization.get_date()), width=terminal.get_size()[0]))
def output_footer():
if __selected_format__ == "html" or __selected_format__ == "htmlembedded":
base = basedir.get_basedir()
html_footer = __output_html_template__(base + "/html/html.footer")
print(html_footer)
elif __selected_format__ == "json":
print("\n\t}\n}")
elif __selected_format__ == "xml":
print("</gitinspector>")
if __selected_format__ == "html" or __selected_format__ == "htmlembedded":
base = basedir.get_basedir()
html_footer = __output_html_template__(base + "/html/html.footer")
print(html_footer)
elif __selected_format__ == "json":
print("\n\t}\n}")
elif __selected_format__ == "xml":
print("</gitinspector>")

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import atexit
import getopt
import os
@ -27,8 +26,7 @@ from .blame import Blame
from .changes import Changes
from .config import GitConfig
from .metrics import MetricsLogic
from . import (basedir, clone, extensions, filtering, format, help, interval,
localization, optval, terminal, version)
from . import basedir, clone, extensions, filtering, format, help, interval, localization, optval, terminal, version
from .output import outputable
from .output.blameoutput import BlameOutput
from .output.changesoutput import ChangesOutput
@ -40,179 +38,202 @@ from .output.timelineoutput import TimelineOutput
localization.init()
class Runner(object):
def __init__(self):
self.hard = False
self.include_metrics = False
self.list_file_types = False
self.localize_output = False
self.responsibilities = False
self.grading = False
self.timeline = False
self.useweeks = False
def __init__(self):
self.hard = False
self.include_metrics = False
self.list_file_types = False
self.localize_output = False
self.responsibilities = False
self.grading = False
self.timeline = False
self.useweeks = False
def process(self, repos):
localization.check_compatibility(version.__version__)
def process(self, repos):
localization.check_compatibility(version.__version__)
if not self.localize_output:
localization.disable()
if not self.localize_output:
localization.disable()
terminal.skip_escapes(not sys.stdout.isatty())
terminal.set_stdout_encoding()
previous_directory = os.getcwd()
summed_blames = Blame.__new__(Blame)
summed_changes = Changes.__new__(Changes)
summed_metrics = MetricsLogic.__new__(MetricsLogic)
terminal.skip_escapes(not sys.stdout.isatty())
terminal.set_stdout_encoding()
previous_directory = os.getcwd()
summed_blames = Blame.__new__(Blame)
summed_changes = Changes.__new__(Changes)
summed_metrics = MetricsLogic.__new__(MetricsLogic)
for repo in repos:
os.chdir(repo.location)
repo = repo if len(repos) > 1 else None
changes = Changes(repo, self.hard)
summed_blames += Blame(repo, self.hard, self.useweeks, changes)
summed_changes += changes
for repo in repos:
os.chdir(repo.location)
repo = repo if len(repos) > 1 else None
changes = Changes(repo, self.hard)
summed_blames += Blame(repo, self.hard, self.useweeks, changes)
summed_changes += changes
if self.include_metrics:
summed_metrics += MetricsLogic()
if self.include_metrics:
summed_metrics += MetricsLogic()
if sys.stdout.isatty() and format.is_interactive_format():
terminal.clear_row()
else:
os.chdir(previous_directory)
if sys.stdout.isatty() and format.is_interactive_format():
terminal.clear_row()
else:
os.chdir(previous_directory)
format.output_header(repos)
outputable.output(ChangesOutput(summed_changes))
format.output_header(repos)
outputable.output(ChangesOutput(summed_changes))
if summed_changes.get_commits():
outputable.output(BlameOutput(summed_changes, summed_blames))
if summed_changes.get_commits():
outputable.output(BlameOutput(summed_changes, summed_blames))
if self.timeline:
outputable.output(TimelineOutput(summed_changes, self.useweeks))
if self.timeline:
outputable.output(TimelineOutput(summed_changes, self.useweeks))
if self.include_metrics:
outputable.output(MetricsOutput(summed_metrics))
if self.include_metrics:
outputable.output(MetricsOutput(summed_metrics))
if self.responsibilities:
outputable.output(ResponsibilitiesOutput(summed_changes, summed_blames))
if self.responsibilities:
outputable.output(ResponsibilitiesOutput(summed_changes, summed_blames))
outputable.output(FilteringOutput())
outputable.output(FilteringOutput())
if self.list_file_types:
outputable.output(ExtensionsOutput())
if self.list_file_types:
outputable.output(ExtensionsOutput())
format.output_footer()
os.chdir(previous_directory)
format.output_footer()
os.chdir(previous_directory)
def __check_python_version__():
if sys.version_info < (2, 6):
python_version = str(sys.version_info[0]) + "." + str(sys.version_info[1])
sys.exit(_("gitinspector requires at least Python 2.6 to run (version {0} was found).").format(python_version))
if sys.version_info < (2, 6):
python_version = str(sys.version_info[0]) + "." + str(sys.version_info[1])
sys.exit(_("gitinspector requires at least Python 2.6 to run (version {0} was found).").format(python_version))
def __get_validated_git_repos__(repos_relative):
if not repos_relative:
repos_relative = "."
if not repos_relative:
repos_relative = "."
repos = []
repos = []
#Try to clone the repos or return the same directory and bail out.
for repo in repos_relative:
cloned_repo = clone.create(repo)
# Try to clone the repos or return the same directory and bail out.
for repo in repos_relative:
cloned_repo = clone.create(repo)
if cloned_repo.name == None:
cloned_repo.location = basedir.get_basedir_git(cloned_repo.location)
cloned_repo.name = os.path.basename(cloned_repo.location)
if cloned_repo.name is None:
cloned_repo.location = basedir.get_basedir_git(cloned_repo.location)
cloned_repo.name = os.path.basename(cloned_repo.location)
repos.append(cloned_repo)
repos.append(cloned_repo)
return repos
return repos
def main():
terminal.check_terminal_encoding()
terminal.set_stdin_encoding()
argv = terminal.convert_command_line_to_utf8()
run = Runner()
repos = []
terminal.check_terminal_encoding()
terminal.set_stdin_encoding()
argv = terminal.convert_command_line_to_utf8()
run = Runner()
repos = []
try:
opts, args = optval.gnu_getopt(argv[1:], "f:F:hHlLmrTwx:", ["exclude=", "file-types=", "format=",
"hard:true", "help", "list-file-types:true", "localize-output:true",
"metrics:true", "responsibilities:true", "since=", "grading:true",
"timeline:true", "until=", "version", "weeks:true"])
repos = __get_validated_git_repos__(set(args))
try:
opts, args = optval.gnu_getopt(
argv[1:],
"f:F:hHlLmrTwx:",
[
"exclude=",
"file-types=",
"format=",
"hard:true",
"help",
"list-file-types:true",
"localize-output:true",
"metrics:true",
"responsibilities:true",
"since=",
"grading:true",
"timeline:true",
"until=",
"version",
"weeks:true",
],
)
repos = __get_validated_git_repos__(set(args))
#We need the repos above to be set before we read the git config.
GitConfig(run, repos[-1].location).read()
clear_x_on_next_pass = True
# We need the repos above to be set before we read the git config.
GitConfig(run, repos[-1].location).read()
clear_x_on_next_pass = True
for o, a in opts:
if o in("-h", "--help"):
help.output()
sys.exit(0)
elif o in("-f", "--file-types"):
extensions.define(a)
elif o in("-F", "--format"):
if not format.select(a):
raise format.InvalidFormatError(_("specified output format not supported."))
elif o == "-H":
run.hard = True
elif o == "--hard":
run.hard = optval.get_boolean_argument(a)
elif o == "-l":
run.list_file_types = True
elif o == "--list-file-types":
run.list_file_types = optval.get_boolean_argument(a)
elif o == "-L":
run.localize_output = True
elif o == "--localize-output":
run.localize_output = optval.get_boolean_argument(a)
elif o == "-m":
run.include_metrics = True
elif o == "--metrics":
run.include_metrics = optval.get_boolean_argument(a)
elif o == "-r":
run.responsibilities = True
elif o == "--responsibilities":
run.responsibilities = optval.get_boolean_argument(a)
elif o == "--since":
interval.set_since(a)
elif o == "--version":
version.output()
sys.exit(0)
elif o == "--grading":
grading = optval.get_boolean_argument(a)
run.include_metrics = grading
run.list_file_types = grading
run.responsibilities = grading
run.grading = grading
run.hard = grading
run.timeline = grading
run.useweeks = grading
elif o == "-T":
run.timeline = True
elif o == "--timeline":
run.timeline = optval.get_boolean_argument(a)
elif o == "--until":
interval.set_until(a)
elif o == "-w":
run.useweeks = True
elif o == "--weeks":
run.useweeks = optval.get_boolean_argument(a)
elif o in("-x", "--exclude"):
if clear_x_on_next_pass:
clear_x_on_next_pass = False
filtering.clear()
filtering.add(a)
for o, a in opts:
if o in ("-h", "--help"):
help.output()
sys.exit(0)
elif o in ("-f", "--file-types"):
extensions.define(a)
elif o in ("-F", "--format"):
if not format.select(a):
raise format.InvalidFormatError(_("specified output format not supported."))
elif o == "-H":
run.hard = True
elif o == "--hard":
run.hard = optval.get_boolean_argument(a)
elif o == "-l":
run.list_file_types = True
elif o == "--list-file-types":
run.list_file_types = optval.get_boolean_argument(a)
elif o == "-L":
run.localize_output = True
elif o == "--localize-output":
run.localize_output = optval.get_boolean_argument(a)
elif o == "-m":
run.include_metrics = True
elif o == "--metrics":
run.include_metrics = optval.get_boolean_argument(a)
elif o == "-r":
run.responsibilities = True
elif o == "--responsibilities":
run.responsibilities = optval.get_boolean_argument(a)
elif o == "--since":
interval.set_since(a)
elif o == "--version":
version.output()
sys.exit(0)
elif o == "--grading":
grading = optval.get_boolean_argument(a)
run.include_metrics = grading
run.list_file_types = grading
run.responsibilities = grading
run.grading = grading
run.hard = grading
run.timeline = grading
run.useweeks = grading
elif o == "-T":
run.timeline = True
elif o == "--timeline":
run.timeline = optval.get_boolean_argument(a)
elif o == "--until":
interval.set_until(a)
elif o == "-w":
run.useweeks = True
elif o == "--weeks":
run.useweeks = optval.get_boolean_argument(a)
elif o in ("-x", "--exclude"):
if clear_x_on_next_pass:
clear_x_on_next_pass = False
filtering.clear()
filtering.add(a)
__check_python_version__()
run.process(repos)
__check_python_version__()
run.process(repos)
except (filtering.InvalidRegExpError, format.InvalidFormatError, optval.InvalidOptionArgument, getopt.error) as exception:
print(sys.argv[0], "\b:", exception.msg, file=sys.stderr)
print(_("Try `{0} --help' for more information.").format(sys.argv[0]), file=sys.stderr)
sys.exit(2)
except (filtering.InvalidRegExpError, format.InvalidFormatError, optval.InvalidOptionArgument, getopt.error) as exception:
print(sys.argv[0], "\b:", exception.msg, file=sys.stderr)
print(_("Try `{0} --help' for more information.").format(sys.argv[0]), file=sys.stderr)
sys.exit(2)
@atexit.register
def cleanup():
clone.delete()
clone.delete()
if __name__ == "__main__":
main()
main()

View File

@ -21,20 +21,21 @@
import hashlib
try:
from urllib.parse import urlencode
from urllib.parse import urlencode
except:
from urllib.parse import urlencode
from urllib.parse import urlencode
from . import format
def get_url(email, size=20):
md5hash = hashlib.md5(email.encode("utf-8").lower().strip()).hexdigest()
base_url = "https://www.gravatar.com/avatar/" + md5hash
params = None
md5hash = hashlib.md5(email.encode("utf-8").lower().strip()).hexdigest()
base_url = "https://www.gravatar.com/avatar/" + md5hash
params = None
if format.get_selected() == "html":
params = {"default": "identicon", "size": size}
elif format.get_selected() == "xml" or format.get_selected() == "json":
params = {"default": "identicon"}
if format.get_selected() == "html":
params = {"default": "identicon", "size": size}
elif format.get_selected() == "xml" or format.get_selected() == "json":
params = {"default": "identicon"}
return base_url + "?" + urlencode(params)
return base_url + "?" + urlencode(params)

View File

@ -18,13 +18,13 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import sys
from .extensions import DEFAULT_EXTENSIONS
from .format import __available_formats__
__doc__ = _("""Usage: {0} [OPTION]... [REPOSITORY]...
__doc__ = _(
"""Usage: {0} [OPTION]... [REPOSITORY]...
List information about the repository in REPOSITORY. If no repository is
specified, the current directory is used. If multiple repositories are
given, information will be merged into a unified statistical report.
@ -76,7 +76,9 @@ add or remove one of the specified extensions, see -f or --file-types for
more information.
gitinspector requires that the git executable is available in your PATH.
Report gitinspector bugs to gitinspector@ejwa.se.""")
Report gitinspector bugs to gitinspector@ejwa.se."""
)
def output():
print(__doc__.format(sys.argv[0], ",".join(DEFAULT_EXTENSIONS), ",".join(__available_formats__)))
print(__doc__.format(sys.argv[0], ",".join(DEFAULT_EXTENSIONS), ",".join(__available_formats__)))

View File

@ -18,11 +18,10 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
try:
from shlex import quote
from shlex import quote
except ImportError:
from pipes import quote
from pipes import quote
__since__ = ""
@ -30,26 +29,33 @@ __until__ = ""
__ref__ = "HEAD"
def has_interval():
return __since__ + __until__ != ""
return __since__ + __until__ != ""
def get_since():
return __since__
return __since__
def set_since(since):
global __since__
__since__ = "--since=" + quote(since)
global __since__
__since__ = "--since=" + quote(since)
def get_until():
return __until__
return __until__
def set_until(until):
global __until__
__until__ = "--until=" + quote(until)
global __until__
__until__ = "--until=" + quote(until)
def get_ref():
return __ref__
return __ref__
def set_ref(ref):
global __ref__
__ref__ = ref
global __ref__
__ref__ = ref

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import gettext
import locale
import os
@ -31,76 +30,84 @@ __enabled__ = False
__installed__ = False
__translation__ = None
#Dummy function used to handle string constants
# Dummy function used to handle string constants
def N_(message):
return message
return message
def init():
global __enabled__
global __installed__
global __translation__
global __enabled__
global __installed__
global __translation__
if not __installed__:
try:
locale.setlocale(locale.LC_ALL, "")
except locale.Error:
__translation__ = gettext.NullTranslations()
else:
lang = locale.getlocale()
if not __installed__:
try:
locale.setlocale(locale.LC_ALL, "")
except locale.Error:
__translation__ = gettext.NullTranslations()
else:
lang = locale.getlocale()
#Fix for non-POSIX-compliant systems (Windows et al.).
if os.getenv('LANG') is None:
lang = locale.getdefaultlocale()
# Fix for non-POSIX-compliant systems (Windows et al.).
if os.getenv("LANG") is None:
lang = locale.getdefaultlocale()
if lang[0]:
os.environ['LANG'] = lang[0]
if lang[0]:
os.environ["LANG"] = lang[0]
if lang[0] is not None:
filename = basedir.get_basedir() + "/translations/messages_%s.mo" % lang[0][0:2]
if lang[0] is not None:
filename = basedir.get_basedir() + "/translations/messages_%s.mo" % lang[0][0:2]
try:
__translation__ = gettext.GNUTranslations(open(filename, "rb"))
except IOError:
__translation__ = gettext.NullTranslations()
else:
print("WARNING: Localization disabled because the system language could not be determined.", file=sys.stderr)
__translation__ = gettext.NullTranslations()
try:
__translation__ = gettext.GNUTranslations(open(filename, "rb"))
except IOError:
__translation__ = gettext.NullTranslations()
else:
print("WARNING: Localization disabled because the system language could not be determined.", file=sys.stderr)
__translation__ = gettext.NullTranslations()
__enabled__ = True
__installed__ = True
__translation__.install()
__enabled__ = True
__installed__ = True
__translation__.install()
def check_compatibility(version):
if isinstance(__translation__, gettext.GNUTranslations):
header_pattern = re.compile("^([^:\n]+): *(.*?) *$", re.MULTILINE)
header_entries = dict(header_pattern.findall(_("")))
if isinstance(__translation__, gettext.GNUTranslations):
header_pattern = re.compile("^([^:\n]+): *(.*?) *$", re.MULTILINE)
header_entries = dict(header_pattern.findall(_("")))
if header_entries["Project-Id-Version"] != "gitinspector {0}".format(version):
print(
"WARNING: The translation for your system locale is not up to date with the current gitinspector "
"version. The current maintainer of this locale is {0}.".format(header_entries["Last-Translator"]),
file=sys.stderr,
)
if header_entries["Project-Id-Version"] != "gitinspector {0}".format(version):
print("WARNING: The translation for your system locale is not up to date with the current gitinspector "
"version. The current maintainer of this locale is {0}.".format(header_entries["Last-Translator"]),
file=sys.stderr)
def get_date():
if __enabled__ and isinstance(__translation__, gettext.GNUTranslations):
date = time.strftime("%x")
if __enabled__ and isinstance(__translation__, gettext.GNUTranslations):
date = time.strftime("%x")
if hasattr(date, 'decode'):
date = date.decode("utf-8", "replace")
if hasattr(date, "decode"):
date = date.decode("utf-8", "replace")
return date
else:
return time.strftime("%Y/%m/%d")
return date
else:
return time.strftime("%Y/%m/%d")
def enable():
if isinstance(__translation__, gettext.GNUTranslations):
__translation__.install(True)
if isinstance(__translation__, gettext.GNUTranslations):
__translation__.install(True)
global __enabled__
__enabled__ = True
global __enabled__
__enabled__ = True
def disable():
global __enabled__
__enabled__ = False
global __enabled__
__enabled__ = False
if __installed__:
gettext.NullTranslations().install()
if __installed__:
gettext.NullTranslations().install()

View File

@ -23,103 +23,137 @@ import subprocess
from .changes import FileDiff
from . import comment, filtering, interval
__metric_eloc__ = {"java": 500, "c": 500, "cpp": 500, "cs": 500, "h": 300, "hpp": 300, "php": 500, "py": 500, "glsl": 1000,
"rb": 500, "js": 500, "sql": 1000, "xml": 1000}
__metric_eloc__ = {
"java": 500,
"c": 500,
"cpp": 500,
"cs": 500,
"h": 300,
"hpp": 300,
"php": 500,
"py": 500,
"glsl": 1000,
"rb": 500,
"js": 500,
"sql": 1000,
"xml": 1000,
}
__metric_cc_tokens__ = [[["java", "js", "c", "cc", "cpp"], ["else", r"for\s+\(.*\)", r"if\s+\(.*\)", r"case\s+\w+:",
"default:", r"while\s+\(.*\)"],
["assert", "break", "continue", "return"]],
[["cs"], ["else", r"for\s+\(.*\)", r"foreach\s+\(.*\)", r"goto\s+\w+:", r"if\s+\(.*\)", r"case\s+\w+:",
"default:", r"while\s+\(.*\)"],
["assert", "break", "continue", "return"]],
[["py"], [r"^\s+elif .*:$", r"^\s+else:$", r"^\s+for .*:", r"^\s+if .*:$", r"^\s+while .*:$"],
[r"^\s+assert", "break", "continue", "return"]]]
__metric_cc_tokens__ = [
[
["java", "js", "c", "cc", "cpp"],
["else", r"for\s+\(.*\)", r"if\s+\(.*\)", r"case\s+\w+:", "default:", r"while\s+\(.*\)"],
["assert", "break", "continue", "return"],
],
[
["cs"],
[
"else",
r"for\s+\(.*\)",
r"foreach\s+\(.*\)",
r"goto\s+\w+:",
r"if\s+\(.*\)",
r"case\s+\w+:",
"default:",
r"while\s+\(.*\)",
],
["assert", "break", "continue", "return"],
],
[
["py"],
[r"^\s+elif .*:$", r"^\s+else:$", r"^\s+for .*:", r"^\s+if .*:$", r"^\s+while .*:$"],
[r"^\s+assert", "break", "continue", "return"],
],
]
METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD = 50
METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD = 0.75
class MetricsLogic(object):
def __init__(self):
self.eloc = {}
self.cyclomatic_complexity = {}
self.cyclomatic_complexity_density = {}
def __init__(self):
self.eloc = {}
self.cyclomatic_complexity = {}
self.cyclomatic_complexity_density = {}
ls_tree_p = subprocess.Popen(["git", "ls-tree", "--name-only", "-r", interval.get_ref()],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines = ls_tree_p.communicate()[0].splitlines()
ls_tree_p.stdout.close()
ls_tree_p = subprocess.Popen(
["git", "ls-tree", "--name-only", "-r", interval.get_ref()], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
lines = ls_tree_p.communicate()[0].splitlines()
ls_tree_p.stdout.close()
if ls_tree_p.returncode == 0:
for i in lines:
i = i.strip().decode("unicode_escape", "ignore")
i = i.encode("latin-1", "replace")
i = i.decode("utf-8", "replace").strip("\"").strip("'").strip()
if ls_tree_p.returncode == 0:
for i in lines:
i = i.strip().decode("unicode_escape", "ignore")
i = i.encode("latin-1", "replace")
i = i.decode("utf-8", "replace").strip('"').strip("'").strip()
if FileDiff.is_valid_extension(i) and not filtering.set_filtered(FileDiff.get_filename(i)):
file_r = subprocess.Popen(["git", "show", interval.get_ref() + ":{0}".format(i.strip())],
stdout=subprocess.PIPE).stdout.readlines()
if FileDiff.is_valid_extension(i) and not filtering.set_filtered(FileDiff.get_filename(i)):
file_r = subprocess.Popen(
["git", "show", interval.get_ref() + ":{0}".format(i.strip())], stdout=subprocess.PIPE
).stdout.readlines()
extension = FileDiff.get_extension(i)
lines = MetricsLogic.get_eloc(file_r, extension)
cycc = MetricsLogic.get_cyclomatic_complexity(file_r, extension)
extension = FileDiff.get_extension(i)
lines = MetricsLogic.get_eloc(file_r, extension)
cycc = MetricsLogic.get_cyclomatic_complexity(file_r, extension)
if __metric_eloc__.get(extension, None) != None and __metric_eloc__[extension] < lines:
self.eloc[i.strip()] = lines
if __metric_eloc__.get(extension, None) is not None and __metric_eloc__[extension] < lines:
self.eloc[i.strip()] = lines
if METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD < cycc:
self.cyclomatic_complexity[i.strip()] = cycc
if METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD < cycc:
self.cyclomatic_complexity[i.strip()] = cycc
if lines > 0 and METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD < cycc / float(lines):
self.cyclomatic_complexity_density[i.strip()] = cycc / float(lines)
if lines > 0 and METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD < cycc / float(lines):
self.cyclomatic_complexity_density[i.strip()] = cycc / float(lines)
def __iadd__(self, other):
try:
self.eloc.update(other.eloc)
self.cyclomatic_complexity.update(other.cyclomatic_complexity)
self.cyclomatic_complexity_density.update(other.cyclomatic_complexity_density)
return self
except AttributeError:
return other;
def __iadd__(self, other):
try:
self.eloc.update(other.eloc)
self.cyclomatic_complexity.update(other.cyclomatic_complexity)
self.cyclomatic_complexity_density.update(other.cyclomatic_complexity_density)
return self
except AttributeError:
return other
@staticmethod
def get_cyclomatic_complexity(file_r, extension):
is_inside_comment = False
cc_counter = 0
@staticmethod
def get_cyclomatic_complexity(file_r, extension):
is_inside_comment = False
cc_counter = 0
entry_tokens = None
exit_tokens = None
entry_tokens = None
exit_tokens = None
for i in __metric_cc_tokens__:
if extension in i[0]:
entry_tokens = i[1]
exit_tokens = i[2]
for i in __metric_cc_tokens__:
if extension in i[0]:
entry_tokens = i[1]
exit_tokens = i[2]
if entry_tokens or exit_tokens:
for i in file_r:
i = i.decode("utf-8", "replace")
(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i)
if entry_tokens or exit_tokens:
for i in file_r:
i = i.decode("utf-8", "replace")
(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i)
if not is_inside_comment and not comment.is_comment(extension, i):
for j in entry_tokens:
if re.search(j, i, re.DOTALL):
cc_counter += 2
for j in exit_tokens:
if re.search(j, i, re.DOTALL):
cc_counter += 1
return cc_counter
if not is_inside_comment and not comment.is_comment(extension, i):
for j in entry_tokens:
if re.search(j, i, re.DOTALL):
cc_counter += 2
for j in exit_tokens:
if re.search(j, i, re.DOTALL):
cc_counter += 1
return cc_counter
return -1
return -1
@staticmethod
def get_eloc(file_r, extension):
is_inside_comment = False
eloc_counter = 0
@staticmethod
def get_eloc(file_r, extension):
is_inside_comment = False
eloc_counter = 0
for i in file_r:
i = i.decode("utf-8", "replace")
(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i)
for i in file_r:
i = i.decode("utf-8", "replace")
(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i)
if not is_inside_comment and not comment.is_comment(extension, i):
eloc_counter += 1
if not is_inside_comment and not comment.is_comment(extension, i):
eloc_counter += 1
return eloc_counter
return eloc_counter

View File

@ -20,47 +20,53 @@
import getopt
class InvalidOptionArgument(Exception):
def __init__(self, msg):
super(InvalidOptionArgument, self).__init__(msg)
self.msg = msg
def __init__(self, msg):
super(InvalidOptionArgument, self).__init__(msg)
self.msg = msg
def __find_arg_in_options__(arg, options):
for opt in options:
if opt[0].find(arg) == 0:
return opt
for opt in options:
if opt[0].find(arg) == 0:
return opt
return None
return None
def __find_options_to_extend__(long_options):
options_to_extend = []
options_to_extend = []
for num, arg in enumerate(long_options):
arg = arg.split(":")
if len(arg) == 2:
long_options[num] = arg[0] + "="
options_to_extend.append(("--" + arg[0], arg[1]))
for num, arg in enumerate(long_options):
arg = arg.split(":")
if len(arg) == 2:
long_options[num] = arg[0] + "="
options_to_extend.append(("--" + arg[0], arg[1]))
return options_to_extend
return options_to_extend
# This is a duplicate of gnu_getopt, but with support for optional arguments in long options, in the form; "arg:default_value".
def gnu_getopt(args, options, long_options):
options_to_extend = __find_options_to_extend__(long_options)
options_to_extend = __find_options_to_extend__(long_options)
for num, arg in enumerate(args):
opt = __find_arg_in_options__(arg, options_to_extend)
if opt:
args[num] = arg + "=" + opt[1]
for num, arg in enumerate(args):
opt = __find_arg_in_options__(arg, options_to_extend)
if opt:
args[num] = arg + "=" + opt[1]
return getopt.gnu_getopt(args, options, long_options)
return getopt.gnu_getopt(args, options, long_options)
def get_boolean_argument(arg):
if isinstance(arg, bool):
return arg
elif arg == None or arg.lower() == "false" or arg.lower() == "f" or arg == "0":
return False
elif arg.lower() == "true" or arg.lower() == "t" or arg == "1":
return True
if isinstance(arg, bool):
return arg
elif arg is None or arg.lower() == "false" or arg.lower() == "f" or arg == "0":
return False
elif arg.lower() == "true" or arg.lower() == "t" or arg == "1":
return True
raise InvalidOptionArgument(_("The given option argument is not a valid boolean."))
raise InvalidOptionArgument(_("The given option argument is not a valid boolean."))

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import json
import sys
import textwrap
@ -27,128 +26,160 @@ from .. import format, gravatar, terminal
from ..blame import Blame
from .outputable import Outputable
BLAME_INFO_TEXT = N_("Below are the number of rows from each author that have survived and are still "
"intact in the current revision")
BLAME_INFO_TEXT = N_(
"Below are the number of rows from each author that have survived and are still " "intact in the current revision"
)
class BlameOutput(Outputable):
def __init__(self, changes, blame):
if format.is_interactive_format():
print("")
def __init__(self, changes, blame):
if format.is_interactive_format():
print("")
self.changes = changes
self.blame = blame
Outputable.__init__(self)
self.changes = changes
self.blame = blame
Outputable.__init__(self)
def output_html(self):
blame_xml = "<div><div class=\"box\">"
blame_xml += "<p>" + _(BLAME_INFO_TEXT) + ".</p><div><table id=\"blame\" class=\"git\">"
blame_xml += "<thead><tr> <th>{0}</th> <th>{1}</th> <th>{2}</th> <th>{3}</th> <th>{4}</th> </tr></thead>".format(
_("Author"), _("Rows"), _("Stability"), _("Age"), _("% in comments"))
blame_xml += "<tbody>"
chart_data = ""
blames = sorted(self.blame.get_summed_blames().items())
total_blames = 0
def output_html(self):
blame_xml = '<div><div class="box">'
blame_xml += "<p>" + _(BLAME_INFO_TEXT) + '.</p><div><table id="blame" class="git">'
blame_xml += "<thead><tr> <th>{0}</th> <th>{1}</th> <th>{2}</th> <th>{3}</th> <th>{4}</th> </tr></thead>".format(
_("Author"), _("Rows"), _("Stability"), _("Age"), _("% in comments")
)
blame_xml += "<tbody>"
chart_data = ""
blames = sorted(self.blame.get_summed_blames().items())
total_blames = 0
for i in blames:
total_blames += i[1].rows
for i in blames:
total_blames += i[1].rows
for i, entry in enumerate(blames):
work_percentage = str("{0:.2f}".format(100.0 * entry[1].rows / total_blames))
blame_xml += "<tr " + ("class=\"odd\">" if i % 2 == 1 else ">")
for i, entry in enumerate(blames):
work_percentage = str("{0:.2f}".format(100.0 * entry[1].rows / total_blames))
blame_xml += "<tr " + ('class="odd">' if i % 2 == 1 else ">")
if format.get_selected() == "html":
author_email = self.changes.get_latest_email_by_author(entry[0])
blame_xml += "<td><img src=\"{0}\"/>{1}</td>".format(gravatar.get_url(author_email), entry[0])
else:
blame_xml += "<td>" + entry[0] + "</td>"
if format.get_selected() == "html":
author_email = self.changes.get_latest_email_by_author(entry[0])
blame_xml += '<td><img src="{0}"/>{1}</td>'.format(gravatar.get_url(author_email), entry[0])
else:
blame_xml += "<td>" + entry[0] + "</td>"
blame_xml += "<td>" + str(entry[1].rows) + "</td>"
blame_xml += "<td>" + ("{0:.1f}".format(Blame.get_stability(entry[0], entry[1].rows, self.changes)) + "</td>")
blame_xml += "<td>" + "{0:.1f}".format(float(entry[1].skew) / entry[1].rows) + "</td>"
blame_xml += "<td>" + "{0:.2f}".format(100.0 * entry[1].comments / entry[1].rows) + "</td>"
blame_xml += "<td style=\"display: none\">" + work_percentage + "</td>"
blame_xml += "</tr>"
chart_data += "{{label: {0}, data: {1}}}".format(json.dumps(entry[0]), work_percentage)
blame_xml += "<td>" + str(entry[1].rows) + "</td>"
blame_xml += "<td>" + ("{0:.1f}".format(Blame.get_stability(entry[0], entry[1].rows, self.changes)) + "</td>")
blame_xml += "<td>" + "{0:.1f}".format(float(entry[1].skew) / entry[1].rows) + "</td>"
blame_xml += "<td>" + "{0:.2f}".format(100.0 * entry[1].comments / entry[1].rows) + "</td>"
blame_xml += '<td style="display: none">' + work_percentage + "</td>"
blame_xml += "</tr>"
chart_data += "{{label: {0}, data: {1}}}".format(json.dumps(entry[0]), work_percentage)
if blames[-1] != entry:
chart_data += ", "
if blames[-1] != entry:
chart_data += ", "
blame_xml += "<tfoot><tr> <td colspan=\"5\">&nbsp;</td> </tr></tfoot></tbody></table>"
blame_xml += "<div class=\"chart\" id=\"blame_chart\"></div></div>"
blame_xml += "<script type=\"text/javascript\">"
blame_xml += " blame_plot = $.plot($(\"#blame_chart\"), [{0}], {{".format(chart_data)
blame_xml += " series: {"
blame_xml += " pie: {"
blame_xml += " innerRadius: 0.4,"
blame_xml += " show: true,"
blame_xml += " combine: {"
blame_xml += " threshold: 0.01,"
blame_xml += " label: \"" + _("Minor Authors") + "\""
blame_xml += " }"
blame_xml += " }"
blame_xml += " }, grid: {"
blame_xml += " hoverable: true"
blame_xml += " }"
blame_xml += " });"
blame_xml += "</script></div></div>"
blame_xml += '<tfoot><tr> <td colspan="5">&nbsp;</td> </tr></tfoot></tbody></table>'
blame_xml += '<div class="chart" id="blame_chart"></div></div>'
blame_xml += '<script type="text/javascript">'
blame_xml += ' blame_plot = $.plot($("#blame_chart"), [{0}], {{'.format(chart_data)
blame_xml += " series: {"
blame_xml += " pie: {"
blame_xml += " innerRadius: 0.4,"
blame_xml += " show: true,"
blame_xml += " combine: {"
blame_xml += " threshold: 0.01,"
blame_xml += ' label: "' + _("Minor Authors") + '"'
blame_xml += " }"
blame_xml += " }"
blame_xml += " }, grid: {"
blame_xml += " hoverable: true"
blame_xml += " }"
blame_xml += " });"
blame_xml += "</script></div></div>"
print(blame_xml)
print(blame_xml)
def output_json(self):
message_json = "\t\t\t\"message\": \"" + _(BLAME_INFO_TEXT) + "\",\n"
blame_json = ""
def output_json(self):
message_json = '\t\t\t"message": "' + _(BLAME_INFO_TEXT) + '",\n'
blame_json = ""
for i in sorted(self.blame.get_summed_blames().items()):
author_email = self.changes.get_latest_email_by_author(i[0])
for i in sorted(self.blame.get_summed_blames().items()):
author_email = self.changes.get_latest_email_by_author(i[0])
name_json = "\t\t\t\t\"name\": \"" + i[0] + "\",\n"
email_json = "\t\t\t\t\"email\": \"" + author_email + "\",\n"
gravatar_json = "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n"
rows_json = "\t\t\t\t\"rows\": " + str(i[1].rows) + ",\n"
stability_json = ("\t\t\t\t\"stability\": " + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows,
self.changes)) + ",\n")
age_json = ("\t\t\t\t\"age\": " + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + ",\n")
percentage_in_comments_json = ("\t\t\t\t\"percentage_in_comments\": " +
"{0:.2f}".format(100.0 * i[1].comments / i[1].rows) + "\n")
blame_json += ("{\n" + name_json + email_json + gravatar_json + rows_json + stability_json + age_json +
percentage_in_comments_json + "\t\t\t},")
else:
blame_json = blame_json[:-1]
name_json = '\t\t\t\t"name": "' + i[0] + '",\n'
email_json = '\t\t\t\t"email": "' + author_email + '",\n'
gravatar_json = '\t\t\t\t"gravatar": "' + gravatar.get_url(author_email) + '",\n'
rows_json = '\t\t\t\t"rows": ' + str(i[1].rows) + ",\n"
stability_json = (
'\t\t\t\t"stability": ' + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, self.changes)) + ",\n"
)
age_json = '\t\t\t\t"age": ' + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + ",\n"
percentage_in_comments_json = (
'\t\t\t\t"percentage_in_comments": ' + "{0:.2f}".format(100.0 * i[1].comments / i[1].rows) + "\n"
)
blame_json += (
"{\n"
+ name_json
+ email_json
+ gravatar_json
+ rows_json
+ stability_json
+ age_json
+ percentage_in_comments_json
+ "\t\t\t},"
)
else:
blame_json = blame_json[:-1]
print(",\n\t\t\"blame\": {\n" + message_json + "\t\t\t\"authors\": [\n\t\t\t" + blame_json + "]\n\t\t}", end="")
print(',\n\t\t"blame": {\n' + message_json + '\t\t\t"authors": [\n\t\t\t' + blame_json + "]\n\t\t}", end="")
def output_text(self):
if sys.stdout.isatty() and format.is_interactive_format():
terminal.clear_row()
def output_text(self):
if sys.stdout.isatty() and format.is_interactive_format():
terminal.clear_row()
print(textwrap.fill(_(BLAME_INFO_TEXT) + ":", width=terminal.get_size()[0]) + "\n")
terminal.printb(terminal.ljust(_("Author"), 21) + terminal.rjust(_("Rows"), 10) + terminal.rjust(_("Stability"), 15) +
terminal.rjust(_("Age"), 13) + terminal.rjust(_("% in comments"), 20))
print(textwrap.fill(_(BLAME_INFO_TEXT) + ":", width=terminal.get_size()[0]) + "\n")
terminal.printb(
terminal.ljust(_("Author"), 21)
+ terminal.rjust(_("Rows"), 10)
+ terminal.rjust(_("Stability"), 15)
+ terminal.rjust(_("Age"), 13)
+ terminal.rjust(_("% in comments"), 20)
)
for i in sorted(self.blame.get_summed_blames().items()):
print(terminal.ljust(i[0], 20)[0:20 - terminal.get_excess_column_count(i[0])], end=" ")
print(str(i[1].rows).rjust(10), end=" ")
print("{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, self.changes)).rjust(14), end=" ")
print("{0:.1f}".format(float(i[1].skew) / i[1].rows).rjust(12), end=" ")
print("{0:.2f}".format(100.0 * i[1].comments / i[1].rows).rjust(19))
for i in sorted(self.blame.get_summed_blames().items()):
print(terminal.ljust(i[0], 20)[0:20 - terminal.get_excess_column_count(i[0])], end=" ")
print(str(i[1].rows).rjust(10), end=" ")
print("{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, self.changes)).rjust(14), end=" ")
print("{0:.1f}".format(float(i[1].skew) / i[1].rows).rjust(12), end=" ")
print("{0:.2f}".format(100.0 * i[1].comments / i[1].rows).rjust(19))
def output_xml(self):
message_xml = "\t\t<message>" + _(BLAME_INFO_TEXT) + "</message>\n"
blame_xml = ""
def output_xml(self):
message_xml = "\t\t<message>" + _(BLAME_INFO_TEXT) + "</message>\n"
blame_xml = ""
for i in sorted(self.blame.get_summed_blames().items()):
author_email = self.changes.get_latest_email_by_author(i[0])
for i in sorted(self.blame.get_summed_blames().items()):
author_email = self.changes.get_latest_email_by_author(i[0])
name_xml = "\t\t\t\t<name>" + i[0] + "</name>\n"
email_xml = "\t\t\t\t<email>" + author_email + "</email>\n"
gravatar_xml = "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
rows_xml = "\t\t\t\t<rows>" + str(i[1].rows) + "</rows>\n"
stability_xml = ("\t\t\t\t<stability>" + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows,
self.changes)) + "</stability>\n")
age_xml = ("\t\t\t\t<age>" + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + "</age>\n")
percentage_in_comments_xml = ("\t\t\t\t<percentage-in-comments>" + "{0:.2f}".format(100.0 * i[1].comments / i[1].rows) +
"</percentage-in-comments>\n")
blame_xml += ("\t\t\t<author>\n" + name_xml + email_xml + gravatar_xml + rows_xml + stability_xml +
age_xml + percentage_in_comments_xml + "\t\t\t</author>\n")
name_xml = "\t\t\t\t<name>" + i[0] + "</name>\n"
email_xml = "\t\t\t\t<email>" + author_email + "</email>\n"
gravatar_xml = "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
rows_xml = "\t\t\t\t<rows>" + str(i[1].rows) + "</rows>\n"
stability_xml = (
"\t\t\t\t<stability>" + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, self.changes)) + "</stability>\n"
)
age_xml = "\t\t\t\t<age>" + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + "</age>\n"
percentage_in_comments_xml = (
"\t\t\t\t<percentage-in-comments>"
+ "{0:.2f}".format(100.0 * i[1].comments / i[1].rows)
+ "</percentage-in-comments>\n"
)
blame_xml += (
"\t\t\t<author>\n"
+ name_xml
+ email_xml
+ gravatar_xml
+ rows_xml
+ stability_xml
+ age_xml
+ percentage_in_comments_xml
+ "\t\t\t</author>\n"
)
print("\t<blame>\n" + message_xml + "\t\t<authors>\n" + blame_xml + "\t\t</authors>\n\t</blame>")
print("\t<blame>\n" + message_xml + "\t\t<authors>\n" + blame_xml + "\t\t</authors>\n\t</blame>")

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import json
import textwrap
from ..localization import N_
@ -28,162 +27,189 @@ from .outputable import Outputable
HISTORICAL_INFO_TEXT = N_("The following historical commit information, by author, was found")
NO_COMMITED_FILES_TEXT = N_("No commited files with the specified extensions were found")
class ChangesOutput(Outputable):
def __init__(self, changes):
self.changes = changes
Outputable.__init__(self)
def __init__(self, changes):
self.changes = changes
Outputable.__init__(self)
def output_html(self):
authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0
changes_xml = "<div><div class=\"box\">"
chart_data = ""
def output_html(self):
authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0
changes_xml = '<div><div class="box">'
chart_data = ""
for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions
for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions
if authorinfo_list:
changes_xml += "<p>" + _(HISTORICAL_INFO_TEXT) + ".</p><div><table id=\"changes\" class=\"git\">"
changes_xml += "<thead><tr> <th>{0}</th> <th>{1}</th> <th>{2}</th> <th>{3}</th> <th>{4}</th>".format(
_("Author"), _("Commits"), _("Insertions"), _("Deletions"), _("% of changes"))
changes_xml += "</tr></thead><tbody>"
if authorinfo_list:
changes_xml += "<p>" + _(HISTORICAL_INFO_TEXT) + '.</p><div><table id="changes" class="git">'
changes_xml += "<thead><tr> <th>{0}</th> <th>{1}</th> <th>{2}</th> <th>{3}</th> <th>{4}</th>".format(
_("Author"), _("Commits"), _("Insertions"), _("Deletions"), _("% of changes")
)
changes_xml += "</tr></thead><tbody>"
for i, entry in enumerate(sorted(authorinfo_list)):
authorinfo = authorinfo_list.get(entry)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
for i, entry in enumerate(sorted(authorinfo_list)):
authorinfo = authorinfo_list.get(entry)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
changes_xml += "<tr " + ("class=\"odd\">" if i % 2 == 1 else ">")
changes_xml += "<tr " + ('class="odd">' if i % 2 == 1 else ">")
if format.get_selected() == "html":
changes_xml += "<td><img src=\"{0}\"/>{1}</td>".format(
gravatar.get_url(self.changes.get_latest_email_by_author(entry)), entry)
else:
changes_xml += "<td>" + entry + "</td>"
if format.get_selected() == "html":
changes_xml += '<td><img src="{0}"/>{1}</td>'.format(
gravatar.get_url(self.changes.get_latest_email_by_author(entry)), entry
)
else:
changes_xml += "<td>" + entry + "</td>"
changes_xml += "<td>" + str(authorinfo.commits) + "</td>"
changes_xml += "<td>" + str(authorinfo.insertions) + "</td>"
changes_xml += "<td>" + str(authorinfo.deletions) + "</td>"
changes_xml += "<td>" + "{0:.2f}".format(percentage) + "</td>"
changes_xml += "</tr>"
chart_data += "{{label: {0}, data: {1}}}".format(json.dumps(entry), "{0:.2f}".format(percentage))
changes_xml += "<td>" + str(authorinfo.commits) + "</td>"
changes_xml += "<td>" + str(authorinfo.insertions) + "</td>"
changes_xml += "<td>" + str(authorinfo.deletions) + "</td>"
changes_xml += "<td>" + "{0:.2f}".format(percentage) + "</td>"
changes_xml += "</tr>"
chart_data += "{{label: {0}, data: {1}}}".format(json.dumps(entry), "{0:.2f}".format(percentage))
if sorted(authorinfo_list)[-1] != entry:
chart_data += ", "
if sorted(authorinfo_list)[-1] != entry:
chart_data += ", "
changes_xml += ("<tfoot><tr> <td colspan=\"5\">&nbsp;</td> </tr></tfoot></tbody></table>")
changes_xml += "<div class=\"chart\" id=\"changes_chart\"></div></div>"
changes_xml += "<script type=\"text/javascript\">"
changes_xml += " changes_plot = $.plot($(\"#changes_chart\"), [{0}], {{".format(chart_data)
changes_xml += " series: {"
changes_xml += " pie: {"
changes_xml += " innerRadius: 0.4,"
changes_xml += " show: true,"
changes_xml += " combine: {"
changes_xml += " threshold: 0.01,"
changes_xml += " label: \"" + _("Minor Authors") + "\""
changes_xml += " }"
changes_xml += " }"
changes_xml += " }, grid: {"
changes_xml += " hoverable: true"
changes_xml += " }"
changes_xml += " });"
changes_xml += "</script>"
else:
changes_xml += "<p>" + _(NO_COMMITED_FILES_TEXT) + ".</p>"
changes_xml += '<tfoot><tr> <td colspan="5">&nbsp;</td> </tr></tfoot></tbody></table>'
changes_xml += '<div class="chart" id="changes_chart"></div></div>'
changes_xml += '<script type="text/javascript">'
changes_xml += ' changes_plot = $.plot($("#changes_chart"), [{0}], {{'.format(chart_data)
changes_xml += " series: {"
changes_xml += " pie: {"
changes_xml += " innerRadius: 0.4,"
changes_xml += " show: true,"
changes_xml += " combine: {"
changes_xml += " threshold: 0.01,"
changes_xml += ' label: "' + _("Minor Authors") + '"'
changes_xml += " }"
changes_xml += " }"
changes_xml += " }, grid: {"
changes_xml += " hoverable: true"
changes_xml += " }"
changes_xml += " });"
changes_xml += "</script>"
else:
changes_xml += "<p>" + _(NO_COMMITED_FILES_TEXT) + ".</p>"
changes_xml += "</div></div>"
print(changes_xml)
changes_xml += "</div></div>"
print(changes_xml)
def output_json(self):
authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0
def output_json(self):
authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0
for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions
for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions
if authorinfo_list:
message_json = "\t\t\t\"message\": \"" + _(HISTORICAL_INFO_TEXT) + "\",\n"
changes_json = ""
if authorinfo_list:
message_json = '\t\t\t"message": "' + _(HISTORICAL_INFO_TEXT) + '",\n'
changes_json = ""
for i in sorted(authorinfo_list):
author_email = self.changes.get_latest_email_by_author(i)
authorinfo = authorinfo_list.get(i)
for i in sorted(authorinfo_list):
author_email = self.changes.get_latest_email_by_author(i)
authorinfo = authorinfo_list.get(i)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
name_json = "\t\t\t\t\"name\": \"" + i + "\",\n"
email_json = "\t\t\t\t\"email\": \"" + author_email + "\",\n"
gravatar_json = "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n"
commits_json = "\t\t\t\t\"commits\": " + str(authorinfo.commits) + ",\n"
insertions_json = "\t\t\t\t\"insertions\": " + str(authorinfo.insertions) + ",\n"
deletions_json = "\t\t\t\t\"deletions\": " + str(authorinfo.deletions) + ",\n"
percentage_json = "\t\t\t\t\"percentage_of_changes\": " + "{0:.2f}".format(percentage) + "\n"
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
name_json = '\t\t\t\t"name": "' + i + '",\n'
email_json = '\t\t\t\t"email": "' + author_email + '",\n'
gravatar_json = '\t\t\t\t"gravatar": "' + gravatar.get_url(author_email) + '",\n'
commits_json = '\t\t\t\t"commits": ' + str(authorinfo.commits) + ",\n"
insertions_json = '\t\t\t\t"insertions": ' + str(authorinfo.insertions) + ",\n"
deletions_json = '\t\t\t\t"deletions": ' + str(authorinfo.deletions) + ",\n"
percentage_json = '\t\t\t\t"percentage_of_changes": ' + "{0:.2f}".format(percentage) + "\n"
changes_json += ("{\n" + name_json + email_json + gravatar_json + commits_json +
insertions_json + deletions_json + percentage_json + "\t\t\t}")
changes_json += ","
else:
changes_json = changes_json[:-1]
changes_json += (
"{\n"
+ name_json
+ email_json
+ gravatar_json
+ commits_json
+ insertions_json
+ deletions_json
+ percentage_json
+ "\t\t\t}"
)
changes_json += ","
else:
changes_json = changes_json[:-1]
print("\t\t\"changes\": {\n" + message_json + "\t\t\t\"authors\": [\n\t\t\t" + changes_json + "]\n\t\t}", end="")
else:
print("\t\t\"exception\": \"" + _(NO_COMMITED_FILES_TEXT) + "\"")
print('\t\t"changes": {\n' + message_json + '\t\t\t"authors": [\n\t\t\t' + changes_json + "]\n\t\t}", end="")
else:
print('\t\t"exception": "' + _(NO_COMMITED_FILES_TEXT) + '"')
def output_text(self):
authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0
def output_text(self):
authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0
for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions
for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions
if authorinfo_list:
print(textwrap.fill(_(HISTORICAL_INFO_TEXT) + ":", width=terminal.get_size()[0]) + "\n")
terminal.printb(terminal.ljust(_("Author"), 21) + terminal.rjust(_("Commits"), 13) +
terminal.rjust(_("Insertions"), 14) + terminal.rjust(_("Deletions"), 15) +
terminal.rjust(_("% of changes"), 16))
if authorinfo_list:
print(textwrap.fill(_(HISTORICAL_INFO_TEXT) + ":", width=terminal.get_size()[0]) + "\n")
terminal.printb(
terminal.ljust(_("Author"), 21)
+ terminal.rjust(_("Commits"), 13)
+ terminal.rjust(_("Insertions"), 14)
+ terminal.rjust(_("Deletions"), 15)
+ terminal.rjust(_("% of changes"), 16)
)
for i in sorted(authorinfo_list):
authorinfo = authorinfo_list.get(i)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
for i in sorted(authorinfo_list):
authorinfo = authorinfo_list.get(i)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
print(terminal.ljust(i, 20)[0:20 - terminal.get_excess_column_count(i)], end=" ")
print(str(authorinfo.commits).rjust(13), end=" ")
print(str(authorinfo.insertions).rjust(13), end=" ")
print(str(authorinfo.deletions).rjust(14), end=" ")
print("{0:.2f}".format(percentage).rjust(15))
else:
print(_(NO_COMMITED_FILES_TEXT) + ".")
print(terminal.ljust(i, 20)[0:20 - terminal.get_excess_column_count(i)], end=" ")
print(str(authorinfo.commits).rjust(13), end=" ")
print(str(authorinfo.insertions).rjust(13), end=" ")
print(str(authorinfo.deletions).rjust(14), end=" ")
print("{0:.2f}".format(percentage).rjust(15))
else:
print(_(NO_COMMITED_FILES_TEXT) + ".")
def output_xml(self):
authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0
def output_xml(self):
authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0
for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions
for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions
if authorinfo_list:
message_xml = "\t\t<message>" + _(HISTORICAL_INFO_TEXT) + "</message>\n"
changes_xml = ""
if authorinfo_list:
message_xml = "\t\t<message>" + _(HISTORICAL_INFO_TEXT) + "</message>\n"
changes_xml = ""
for i in sorted(authorinfo_list):
author_email = self.changes.get_latest_email_by_author(i)
authorinfo = authorinfo_list.get(i)
for i in sorted(authorinfo_list):
author_email = self.changes.get_latest_email_by_author(i)
authorinfo = authorinfo_list.get(i)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
name_xml = "\t\t\t\t<name>" + i + "</name>\n"
email_xml = "\t\t\t\t<email>" + author_email + "</email>\n"
gravatar_xml = "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
commits_xml = "\t\t\t\t<commits>" + str(authorinfo.commits) + "</commits>\n"
insertions_xml = "\t\t\t\t<insertions>" + str(authorinfo.insertions) + "</insertions>\n"
deletions_xml = "\t\t\t\t<deletions>" + str(authorinfo.deletions) + "</deletions>\n"
percentage_xml = "\t\t\t\t<percentage-of-changes>" + "{0:.2f}".format(percentage) + "</percentage-of-changes>\n"
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
name_xml = "\t\t\t\t<name>" + i + "</name>\n"
email_xml = "\t\t\t\t<email>" + author_email + "</email>\n"
gravatar_xml = "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
commits_xml = "\t\t\t\t<commits>" + str(authorinfo.commits) + "</commits>\n"
insertions_xml = "\t\t\t\t<insertions>" + str(authorinfo.insertions) + "</insertions>\n"
deletions_xml = "\t\t\t\t<deletions>" + str(authorinfo.deletions) + "</deletions>\n"
percentage_xml = (
"\t\t\t\t<percentage-of-changes>" + "{0:.2f}".format(percentage) + "</percentage-of-changes>\n"
)
changes_xml += ("\t\t\t<author>\n" + name_xml + email_xml + gravatar_xml + commits_xml +
insertions_xml + deletions_xml + percentage_xml + "\t\t\t</author>\n")
changes_xml += (
"\t\t\t<author>\n"
+ name_xml
+ email_xml
+ gravatar_xml
+ commits_xml
+ insertions_xml
+ deletions_xml
+ percentage_xml
+ "\t\t\t</author>\n"
)
print("\t<changes>\n" + message_xml + "\t\t<authors>\n" + changes_xml + "\t\t</authors>\n\t</changes>")
else:
print("\t<changes>\n\t\t<exception>" + _(NO_COMMITED_FILES_TEXT) + "</exception>\n\t</changes>")
print("\t<changes>\n" + message_xml + "\t\t<authors>\n" + changes_xml + "\t\t</authors>\n\t</changes>")
else:
print("\t<changes>\n\t\t<exception>" + _(NO_COMMITED_FILES_TEXT) + "</exception>\n\t</changes>")

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import textwrap
from ..localization import N_
from .. import extensions, terminal
@ -28,70 +27,93 @@ from .outputable import Outputable
EXTENSIONS_INFO_TEXT = N_("The extensions below were found in the repository history")
EXTENSIONS_MARKED_TEXT = N_("(extensions used during statistical analysis are marked)")
class ExtensionsOutput(Outputable):
@staticmethod
def is_marked(extension):
if extension in extensions.__extensions__ or "**" in extensions.__extensions__:
return True
@staticmethod
def is_marked(extension):
if extension in extensions.__extensions__ or "**" in extensions.__extensions__:
return True
return False
return False
def output_html(self):
if extensions.__located_extensions__:
extensions_xml = "<div><div class=\"box\">"
extensions_xml += "<p>{0} {1}.</p><p>".format(_(EXTENSIONS_INFO_TEXT), _(EXTENSIONS_MARKED_TEXT))
def output_html(self):
if extensions.__located_extensions__:
extensions_xml = '<div><div class="box">'
extensions_xml += "<p>{0} {1}.</p><p>".format(_(EXTENSIONS_INFO_TEXT), _(EXTENSIONS_MARKED_TEXT))
for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i):
extensions_xml += "<strong>" + i + "</strong>"
else:
extensions_xml += i
extensions_xml += " "
for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i):
extensions_xml += "<strong>" + i + "</strong>"
else:
extensions_xml += i
extensions_xml += " "
extensions_xml += "</p></div></div>"
print(extensions_xml)
extensions_xml += "</p></div></div>"
print(extensions_xml)
def output_json(self):
if extensions.__located_extensions__:
message_json = "\t\t\t\"message\": \"" + _(EXTENSIONS_INFO_TEXT) + "\",\n"
used_extensions_json = ""
unused_extensions_json = ""
def output_json(self):
if extensions.__located_extensions__:
message_json = '\t\t\t"message": "' + _(EXTENSIONS_INFO_TEXT) + '",\n'
used_extensions_json = ""
unused_extensions_json = ""
for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i):
used_extensions_json += "\"" + i + "\", "
else:
unused_extensions_json += "\"" + i + "\", "
for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i):
used_extensions_json += '"' + i + '", '
else:
unused_extensions_json += '"' + i + '", '
used_extensions_json = used_extensions_json[:-2]
unused_extensions_json = unused_extensions_json[:-2]
used_extensions_json = used_extensions_json[:-2]
unused_extensions_json = unused_extensions_json[:-2]
print(",\n\t\t\"extensions\": {\n" + message_json + "\t\t\t\"used\": [ " + used_extensions_json +
" ],\n\t\t\t\"unused\": [ " + unused_extensions_json + " ]\n" + "\t\t}", end="")
print(
',\n\t\t"extensions": {\n'
+ message_json
+ '\t\t\t"used": [ '
+ used_extensions_json
+ ' ],\n\t\t\t"unused": [ '
+ unused_extensions_json
+ " ]\n"
+ "\t\t}",
end="",
)
def output_text(self):
if extensions.__located_extensions__:
print("\n" + textwrap.fill("{0} {1}:".format(_(EXTENSIONS_INFO_TEXT), _(EXTENSIONS_MARKED_TEXT)),
width=terminal.get_size()[0]))
def output_text(self):
if extensions.__located_extensions__:
print(
"\n"
+ textwrap.fill(
"{0} {1}:".format(_(EXTENSIONS_INFO_TEXT), _(EXTENSIONS_MARKED_TEXT)), width=terminal.get_size()[0]
)
)
for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i):
print("[" + terminal.__bold__ + i + terminal.__normal__ + "]", end=" ")
else:
print (i, end=" ")
print("")
for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i):
print("[" + terminal.__bold__ + i + terminal.__normal__ + "]", end=" ")
else:
print(i, end=" ")
print("")
def output_xml(self):
if extensions.__located_extensions__:
message_xml = "\t\t<message>" + _(EXTENSIONS_INFO_TEXT) + "</message>\n"
used_extensions_xml = ""
unused_extensions_xml = ""
def output_xml(self):
if extensions.__located_extensions__:
message_xml = "\t\t<message>" + _(EXTENSIONS_INFO_TEXT) + "</message>\n"
used_extensions_xml = ""
unused_extensions_xml = ""
for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i):
used_extensions_xml += "\t\t\t<extension>" + i + "</extension>\n"
else:
unused_extensions_xml += "\t\t\t<extension>" + i + "</extension>\n"
for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i):
used_extensions_xml += "\t\t\t<extension>" + i + "</extension>\n"
else:
unused_extensions_xml += "\t\t\t<extension>" + i + "</extension>\n"
print("\t<extensions>\n" + message_xml + "\t\t<used>\n" + used_extensions_xml + "\t\t</used>\n" +
"\t\t<unused>\n" + unused_extensions_xml + "\t\t</unused>\n" + "\t</extensions>")
print(
"\t<extensions>\n"
+ message_xml
+ "\t\t<used>\n"
+ used_extensions_xml
+ "\t\t</used>\n"
+ "\t\t<unused>\n"
+ unused_extensions_xml
+ "\t\t</unused>\n"
+ "\t</extensions>"
)

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import textwrap
from ..localization import N_
from ..filtering import __filters__, has_filtered
@ -26,96 +25,110 @@ from .. import terminal
from .outputable import Outputable
FILTERING_INFO_TEXT = N_("The following files were excluded from the statistics due to the specified exclusion patterns")
FILTERING_AUTHOR_INFO_TEXT = N_("The following authors were excluded from the statistics due to the specified exclusion patterns")
FILTERING_EMAIL_INFO_TEXT = N_("The authors with the following emails were excluded from the statistics due to the specified " \
"exclusion patterns")
FILTERING_COMMIT_INFO_TEXT = N_("The following commit revisions were excluded from the statistics due to the specified " \
"exclusion patterns")
FILTERING_AUTHOR_INFO_TEXT = N_(
"The following authors were excluded from the statistics due to the specified exclusion patterns"
)
FILTERING_EMAIL_INFO_TEXT = N_(
"The authors with the following emails were excluded from the statistics due to the specified " "exclusion patterns"
)
FILTERING_COMMIT_INFO_TEXT = N_(
"The following commit revisions were excluded from the statistics due to the specified " "exclusion patterns"
)
class FilteringOutput(Outputable):
@staticmethod
def __output_html_section__(info_string, filtered):
filtering_xml = ""
@staticmethod
def __output_html_section__(info_string, filtered):
filtering_xml = ""
if filtered:
filtering_xml += "<p>" + info_string + "."+ "</p>"
if filtered:
filtering_xml += "<p>" + info_string + "." + "</p>"
for i in filtered:
filtering_xml += "<p>" + i + "</p>"
for i in filtered:
filtering_xml += "<p>" + i + "</p>"
return filtering_xml
return filtering_xml
def output_html(self):
if has_filtered():
filtering_xml = "<div><div class=\"box\">"
FilteringOutput.__output_html_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
FilteringOutput.__output_html_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
FilteringOutput.__output_html_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
FilteringOutput.__output_html_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1])
filtering_xml += "</div></div>"
def output_html(self):
if has_filtered():
filtering_xml = '<div><div class="box">'
FilteringOutput.__output_html_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
FilteringOutput.__output_html_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
FilteringOutput.__output_html_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
FilteringOutput.__output_html_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1])
filtering_xml += "</div></div>"
print(filtering_xml)
print(filtering_xml)
@staticmethod
def __output_json_section__(info_string, filtered, container_tagname):
if filtered:
message_json = "\t\t\t\t\"message\": \"" + info_string + "\",\n"
filtering_json = ""
@staticmethod
def __output_json_section__(info_string, filtered, container_tagname):
if filtered:
message_json = '\t\t\t\t"message": "' + info_string + '",\n'
filtering_json = ""
for i in filtered:
filtering_json += "\t\t\t\t\t\"" + i + "\",\n"
else:
filtering_json = filtering_json[:-3]
for i in filtered:
filtering_json += '\t\t\t\t\t"' + i + '",\n'
else:
filtering_json = filtering_json[:-3]
return "\n\t\t\t\"{0}\": {{\n".format(container_tagname) + message_json + \
"\t\t\t\t\"entries\": [\n" + filtering_json + "\"\n\t\t\t\t]\n\t\t\t},"
return (
'\n\t\t\t"{0}": {{\n'.format(container_tagname)
+ message_json
+ '\t\t\t\t"entries": [\n'
+ filtering_json
+ '"\n\t\t\t\t]\n\t\t\t},'
)
return ""
return ""
def output_json(self):
if has_filtered():
output = ",\n\t\t\"filtering\": {"
output += FilteringOutput.__output_json_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files")
output += FilteringOutput.__output_json_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors")
output += FilteringOutput.__output_json_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails")
output += FilteringOutput.__output_json_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1], "revision")
output = output[:-1]
output += "\n\t\t}"
print(output, end="")
def output_json(self):
if has_filtered():
output = ',\n\t\t"filtering": {'
output += FilteringOutput.__output_json_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files")
output += FilteringOutput.__output_json_section__(
_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors"
)
output += FilteringOutput.__output_json_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails")
output += FilteringOutput.__output_json_section__(
_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1], "revision"
)
output = output[:-1]
output += "\n\t\t}"
print(output, end="")
@staticmethod
def __output_text_section__(info_string, filtered):
if filtered:
print("\n" + textwrap.fill(info_string + ":", width=terminal.get_size()[0]))
@staticmethod
def __output_text_section__(info_string, filtered):
if filtered:
print("\n" + textwrap.fill(info_string + ":", width=terminal.get_size()[0]))
for i in filtered:
(width, _unused) = terminal.get_size()
print("...%s" % i[-width+3:] if len(i) > width else i)
for i in filtered:
(width, _unused) = terminal.get_size()
print("...%s" % i[-width + 3:] if len(i) > width else i)
def output_text(self):
FilteringOutput.__output_text_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
FilteringOutput.__output_text_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
FilteringOutput.__output_text_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
FilteringOutput.__output_text_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1])
def output_text(self):
FilteringOutput.__output_text_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
FilteringOutput.__output_text_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
FilteringOutput.__output_text_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
FilteringOutput.__output_text_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1])
@staticmethod
def __output_xml_section__(info_string, filtered, container_tagname):
if filtered:
message_xml = "\t\t\t<message>" + info_string + "</message>\n"
filtering_xml = ""
@staticmethod
def __output_xml_section__(info_string, filtered, container_tagname):
if filtered:
message_xml = "\t\t\t<message>" + info_string + "</message>\n"
filtering_xml = ""
for i in filtered:
filtering_xml += "\t\t\t\t<entry>" + i + "</entry>\n"
for i in filtered:
filtering_xml += "\t\t\t\t<entry>" + i + "</entry>\n"
print("\t\t<{0}>".format(container_tagname))
print(message_xml + "\t\t\t<entries>\n" + filtering_xml + "\t\t\t</entries>\n")
print("\t\t</{0}>".format(container_tagname))
print("\t\t<{0}>".format(container_tagname))
print(message_xml + "\t\t\t<entries>\n" + filtering_xml + "\t\t\t</entries>\n")
print("\t\t</{0}>".format(container_tagname))
def output_xml(self):
if has_filtered():
print("\t<filtering>")
FilteringOutput.__output_xml_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files")
FilteringOutput.__output_xml_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors")
FilteringOutput.__output_xml_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails")
FilteringOutput.__output_xml_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1], "revision")
print("\t</filtering>")
def output_xml(self):
if has_filtered():
print("\t<filtering>")
FilteringOutput.__output_xml_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files")
FilteringOutput.__output_xml_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors")
FilteringOutput.__output_xml_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails")
FilteringOutput.__output_xml_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1], "revision")
print("\t</filtering>")

View File

@ -18,143 +18,168 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from ..changes import FileDiff
from ..localization import N_
from ..metrics import (__metric_eloc__, METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD)
from ..metrics import __metric_eloc__, METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD
from .outputable import Outputable
ELOC_INFO_TEXT = N_("The following files are suspiciously big (in order of severity)")
CYCLOMATIC_COMPLEXITY_TEXT = N_("The following files have an elevated cyclomatic complexity (in order of severity)")
CYCLOMATIC_COMPLEXITY_DENSITY_TEXT = N_("The following files have an elevated cyclomatic complexity density " \
"(in order of severity)")
CYCLOMATIC_COMPLEXITY_DENSITY_TEXT = N_(
"The following files have an elevated cyclomatic complexity density " "(in order of severity)"
)
METRICS_MISSING_INFO_TEXT = N_("No metrics violations were found in the repository")
METRICS_VIOLATION_SCORES = [[1.0, "minimal"], [1.25, "minor"], [1.5, "medium"], [2.0, "bad"], [3.0, "severe"]]
def __get_metrics_score__(ceiling, value):
for i in reversed(METRICS_VIOLATION_SCORES):
if value > ceiling * i[0]:
return i[1]
for i in reversed(METRICS_VIOLATION_SCORES):
if value > ceiling * i[0]:
return i[1]
class MetricsOutput(Outputable):
def __init__(self, metrics):
self.metrics = metrics
Outputable.__init__(self)
def __init__(self, metrics):
self.metrics = metrics
Outputable.__init__(self)
def output_text(self):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print("\n" + _(METRICS_MISSING_INFO_TEXT) + ".")
def output_text(self):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print("\n" + _(METRICS_MISSING_INFO_TEXT) + ".")
if self.metrics.eloc:
print("\n" + _(ELOC_INFO_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True):
print(_("{0} ({1} estimated lines of code)").format(i[1], str(i[0])))
if self.metrics.eloc:
print("\n" + _(ELOC_INFO_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True):
print(_("{0} ({1} estimated lines of code)").format(i[1], str(i[0])))
if self.metrics.cyclomatic_complexity:
print("\n" + _(CYCLOMATIC_COMPLEXITY_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True):
print(_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0])))
if self.metrics.cyclomatic_complexity:
print("\n" + _(CYCLOMATIC_COMPLEXITY_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True):
print(_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0])))
if self.metrics.cyclomatic_complexity_density:
print("\n" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True):
print(_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0]))
if self.metrics.cyclomatic_complexity_density:
print("\n" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + ":")
for i in sorted(
set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True
):
print(_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0]))
def output_html(self):
metrics_xml = "<div><div class=\"box\" id=\"metrics\">"
def output_html(self):
metrics_xml = '<div><div class="box" id="metrics">'
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
metrics_xml += "<p>" + _(METRICS_MISSING_INFO_TEXT) + ".</p>"
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
metrics_xml += "<p>" + _(METRICS_MISSING_INFO_TEXT) + ".</p>"
if self.metrics.eloc:
metrics_xml += "<div><h4>" + _(ELOC_INFO_TEXT) + ".</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True)):
metrics_xml += "<div class=\"" + __get_metrics_score__(__metric_eloc__[FileDiff.get_extension(i[1])], i[0]) + \
(" odd\">" if num % 2 == 1 else "\">") + \
_("{0} ({1} estimated lines of code)").format(i[1], str(i[0])) + "</div>"
metrics_xml += "</div>"
if self.metrics.eloc:
metrics_xml += "<div><h4>" + _(ELOC_INFO_TEXT) + ".</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True)):
metrics_xml += (
'<div class="'
+ __get_metrics_score__(__metric_eloc__[FileDiff.get_extension(i[1])], i[0])
+ (' odd">' if num % 2 == 1 else '">')
+ _("{0} ({1} estimated lines of code)").format(i[1], str(i[0]))
+ "</div>"
)
metrics_xml += "</div>"
if self.metrics.cyclomatic_complexity:
metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_TEXT) + "</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True)):
metrics_xml += "<div class=\"" + __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, i[0]) + \
(" odd\">" if num % 2 == 1 else "\">") + \
_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0])) + "</div>"
metrics_xml += "</div>"
if self.metrics.cyclomatic_complexity:
metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_TEXT) + "</h4>"
for num, i in enumerate(
sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True)
):
metrics_xml += (
'<div class="'
+ __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, i[0])
+ (' odd">' if num % 2 == 1 else '">')
+ _("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0]))
+ "</div>"
)
metrics_xml += "</div>"
if self.metrics.cyclomatic_complexity_density:
metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + "</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True)):
metrics_xml += "<div class=\"" + __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD, i[0]) + \
(" odd\">" if num % 2 == 1 else "\">") + \
_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0]) + "</div>"
metrics_xml += "</div>"
if self.metrics.cyclomatic_complexity_density:
metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + "</h4>"
for num, i in enumerate(
sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True)
):
metrics_xml += (
'<div class="'
+ __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD, i[0])
+ (' odd">' if num % 2 == 1 else '">')
+ _("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0])
+ "</div>"
)
metrics_xml += "</div>"
metrics_xml += "</div></div>"
print(metrics_xml)
metrics_xml += "</div></div>"
print(metrics_xml)
def output_json(self):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print(",\n\t\t\"metrics\": {\n\t\t\t\"message\": \"" + _(METRICS_MISSING_INFO_TEXT) + "\"\n\t\t}", end="")
else:
eloc_json = ""
def output_json(self):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print(',\n\t\t"metrics": {\n\t\t\t"message": "' + _(METRICS_MISSING_INFO_TEXT) + '"\n\t\t}', end="")
else:
eloc_json = ""
if self.metrics.eloc:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True):
eloc_json += "{\n\t\t\t\t\"type\": \"estimated-lines-of-code\",\n"
eloc_json += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n"
eloc_json += "\t\t\t\t\"value\": " + str(i[0]) + "\n"
eloc_json += "\t\t\t},"
else:
if not self.metrics.cyclomatic_complexity:
eloc_json = eloc_json[:-1]
if self.metrics.eloc:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True):
eloc_json += '{\n\t\t\t\t"type": "estimated-lines-of-code",\n'
eloc_json += '\t\t\t\t"file_name": "' + i[1] + '",\n'
eloc_json += '\t\t\t\t"value": ' + str(i[0]) + "\n"
eloc_json += "\t\t\t},"
else:
if not self.metrics.cyclomatic_complexity:
eloc_json = eloc_json[:-1]
if self.metrics.cyclomatic_complexity:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True):
eloc_json += "{\n\t\t\t\t\"type\": \"cyclomatic-complexity\",\n"
eloc_json += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n"
eloc_json += "\t\t\t\t\"value\": " + str(i[0]) + "\n"
eloc_json += "\t\t\t},"
else:
if not self.metrics.cyclomatic_complexity_density:
eloc_json = eloc_json[:-1]
if self.metrics.cyclomatic_complexity:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True):
eloc_json += '{\n\t\t\t\t"type": "cyclomatic-complexity",\n'
eloc_json += '\t\t\t\t"file_name": "' + i[1] + '",\n'
eloc_json += '\t\t\t\t"value": ' + str(i[0]) + "\n"
eloc_json += "\t\t\t},"
else:
if not self.metrics.cyclomatic_complexity_density:
eloc_json = eloc_json[:-1]
if self.metrics.cyclomatic_complexity_density:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True):
eloc_json += "{\n\t\t\t\t\"type\": \"cyclomatic-complexity-density\",\n"
eloc_json += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n"
eloc_json += "\t\t\t\t\"value\": {0:.3f}\n".format(i[0])
eloc_json += "\t\t\t},"
else:
eloc_json = eloc_json[:-1]
if self.metrics.cyclomatic_complexity_density:
for i in sorted(
set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True
):
eloc_json += '{\n\t\t\t\t"type": "cyclomatic-complexity-density",\n'
eloc_json += '\t\t\t\t"file_name": "' + i[1] + '",\n'
eloc_json += '\t\t\t\t"value": {0:.3f}\n'.format(i[0])
eloc_json += "\t\t\t},"
else:
eloc_json = eloc_json[:-1]
print(",\n\t\t\"metrics\": {\n\t\t\t\"violations\": [\n\t\t\t" + eloc_json + "]\n\t\t}", end="")
def output_xml(self):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print("\t<metrics>\n\t\t<message>" + _(METRICS_MISSING_INFO_TEXT) + "</message>\n\t</metrics>")
else:
eloc_xml = ""
print(',\n\t\t"metrics": {\n\t\t\t"violations": [\n\t\t\t' + eloc_json + "]\n\t\t}", end="")
if self.metrics.eloc:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True):
eloc_xml += "\t\t\t<estimated-lines-of-code>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n"
eloc_xml += "\t\t\t</estimated-lines-of-code>\n"
def output_xml(self):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print("\t<metrics>\n\t\t<message>" + _(METRICS_MISSING_INFO_TEXT) + "</message>\n\t</metrics>")
else:
eloc_xml = ""
if self.metrics.cyclomatic_complexity:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True):
eloc_xml += "\t\t\t<cyclomatic-complexity>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n"
eloc_xml += "\t\t\t</cyclomatic-complexity>\n"
if self.metrics.eloc:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True):
eloc_xml += "\t\t\t<estimated-lines-of-code>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n"
eloc_xml += "\t\t\t</estimated-lines-of-code>\n"
if self.metrics.cyclomatic_complexity_density:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True):
eloc_xml += "\t\t\t<cyclomatic-complexity-density>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>{0:.3f}</value>\n".format(i[0])
eloc_xml += "\t\t\t</cyclomatic-complexity-density>\n"
if self.metrics.cyclomatic_complexity:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True):
eloc_xml += "\t\t\t<cyclomatic-complexity>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n"
eloc_xml += "\t\t\t</cyclomatic-complexity>\n"
print("\t<metrics>\n\t\t<violations>\n" + eloc_xml + "\t\t</violations>\n\t</metrics>")
if self.metrics.cyclomatic_complexity_density:
for i in sorted(
set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True
):
eloc_xml += "\t\t\t<cyclomatic-complexity-density>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>{0:.3f}</value>\n".format(i[0])
eloc_xml += "\t\t\t</cyclomatic-complexity-density>\n"
print("\t<metrics>\n\t\t<violations>\n" + eloc_xml + "\t\t</violations>\n\t</metrics>")

View File

@ -18,28 +18,29 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from .. import format
class Outputable(object):
def output_html(self):
raise NotImplementedError(_("HTML output not yet supported in") + " \"" + self.__class__.__name__ + "\".")
def output_html(self):
raise NotImplementedError(_("HTML output not yet supported in") + ' "' + self.__class__.__name__ + '".')
def output_json(self):
raise NotImplementedError(_("JSON output not yet supported in") + " \"" + self.__class__.__name__ + "\".")
def output_json(self):
raise NotImplementedError(_("JSON output not yet supported in") + ' "' + self.__class__.__name__ + '".')
def output_text(self):
raise NotImplementedError(_("Text output not yet supported in") + " \"" + self.__class__.__name__ + "\".")
def output_text(self):
raise NotImplementedError(_("Text output not yet supported in") + ' "' + self.__class__.__name__ + '".')
def output_xml(self):
raise NotImplementedError(_("XML output not yet supported in") + ' "' + self.__class__.__name__ + '".')
def output_xml(self):
raise NotImplementedError(_("XML output not yet supported in") + " \"" + self.__class__.__name__ + "\".")
def output(outputable):
if format.get_selected() == "html" or format.get_selected() == "htmlembedded":
outputable.output_html()
elif format.get_selected() == "json":
outputable.output_json()
elif format.get_selected() == "text":
outputable.output_text()
else:
outputable.output_xml()
if format.get_selected() == "html" or format.get_selected() == "htmlembedded":
outputable.output_html()
elif format.get_selected() == "json":
outputable.output_json()
elif format.get_selected() == "text":
outputable.output_text()
else:
outputable.output_xml()

View File

@ -18,126 +18,130 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import textwrap
from ..localization import N_
from .. import format, gravatar, terminal
from .. import responsibilities as resp
from .outputable import Outputable
RESPONSIBILITIES_INFO_TEXT = N_("The following responsibilities, by author, were found in the current "
"revision of the repository (comments are excluded from the line count, "
"if possible)")
RESPONSIBILITIES_INFO_TEXT = N_(
"The following responsibilities, by author, were found in the current "
"revision of the repository (comments are excluded from the line count, "
"if possible)"
)
MOSTLY_RESPONSIBLE_FOR_TEXT = N_("is mostly responsible for")
class ResponsibilitiesOutput(Outputable):
def __init__(self, changes, blame):
self.changes = changes
self.blame = blame
Outputable.__init__(self)
def __init__(self, changes, blame):
self.changes = changes
self.blame = blame
Outputable.__init__(self)
def output_text(self):
print("\n" + textwrap.fill(_(RESPONSIBILITIES_INFO_TEXT) + ":", width=terminal.get_size()[0]))
def output_text(self):
print("\n" + textwrap.fill(_(RESPONSIBILITIES_INFO_TEXT) + ":", width=terminal.get_size()[0]))
for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
if responsibilities:
print("\n" + i, _(MOSTLY_RESPONSIBLE_FOR_TEXT) + ":")
if responsibilities:
print("\n" + i, _(MOSTLY_RESPONSIBLE_FOR_TEXT) + ":")
for j, entry in enumerate(responsibilities):
(width, _unused) = terminal.get_size()
width -= 7
for j, entry in enumerate(responsibilities):
(width, _unused) = terminal.get_size()
width -= 7
print(str(entry[0]).rjust(6), end=" ")
print("...%s" % entry[1][-width+3:] if len(entry[1]) > width else entry[1])
print(str(entry[0]).rjust(6), end=" ")
print("...%s" % entry[1][-width + 3:] if len(entry[1]) > width else entry[1])
if j >= 9:
break
if j >= 9:
break
def output_html(self):
resp_xml = "<div><div class=\"box\" id=\"responsibilities\">"
resp_xml += "<p>" + _(RESPONSIBILITIES_INFO_TEXT) + ".</p>"
def output_html(self):
resp_xml = '<div><div class="box" id="responsibilities">'
resp_xml += "<p>" + _(RESPONSIBILITIES_INFO_TEXT) + ".</p>"
for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
if responsibilities:
resp_xml += "<div>"
if responsibilities:
resp_xml += "<div>"
if format.get_selected() == "html":
author_email = self.changes.get_latest_email_by_author(i)
resp_xml += "<h3><img src=\"{0}\"/>{1} {2}</h3>".format(gravatar.get_url(author_email, size=32),
i, _(MOSTLY_RESPONSIBLE_FOR_TEXT))
else:
resp_xml += "<h3>{0} {1}</h3>".format(i, _(MOSTLY_RESPONSIBLE_FOR_TEXT))
if format.get_selected() == "html":
author_email = self.changes.get_latest_email_by_author(i)
resp_xml += '<h3><img src="{0}"/>{1} {2}</h3>'.format(
gravatar.get_url(author_email, size=32), i, _(MOSTLY_RESPONSIBLE_FOR_TEXT)
)
else:
resp_xml += "<h3>{0} {1}</h3>".format(i, _(MOSTLY_RESPONSIBLE_FOR_TEXT))
for j, entry in enumerate(responsibilities):
resp_xml += "<div" + (" class=\"odd\">" if j % 2 == 1 else ">") + entry[1] + \
" (" + str(entry[0]) + " eloc)</div>"
if j >= 9:
break
for j, entry in enumerate(responsibilities):
resp_xml += (
"<div" + (' class="odd">' if j % 2 == 1 else ">") + entry[1] + " (" + str(entry[0]) + " eloc)</div>"
)
if j >= 9:
break
resp_xml += "</div>"
resp_xml += "</div></div>"
print(resp_xml)
resp_xml += "</div>"
resp_xml += "</div></div>"
print(resp_xml)
def output_json(self):
message_json = "\t\t\t\"message\": \"" + _(RESPONSIBILITIES_INFO_TEXT) + "\",\n"
resp_json = ""
def output_json(self):
message_json = '\t\t\t"message": "' + _(RESPONSIBILITIES_INFO_TEXT) + '",\n'
resp_json = ""
for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
if responsibilities:
author_email = self.changes.get_latest_email_by_author(i)
if responsibilities:
author_email = self.changes.get_latest_email_by_author(i)
resp_json += "{\n"
resp_json += "\t\t\t\t\"name\": \"" + i + "\",\n"
resp_json += "\t\t\t\t\"email\": \"" + author_email + "\",\n"
resp_json += "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n"
resp_json += "\t\t\t\t\"files\": [\n\t\t\t\t"
resp_json += "{\n"
resp_json += '\t\t\t\t"name": "' + i + '",\n'
resp_json += '\t\t\t\t"email": "' + author_email + '",\n'
resp_json += '\t\t\t\t"gravatar": "' + gravatar.get_url(author_email) + '",\n'
resp_json += '\t\t\t\t"files": [\n\t\t\t\t'
for j, entry in enumerate(responsibilities):
resp_json += "{\n"
resp_json += "\t\t\t\t\t\"name\": \"" + entry[1] + "\",\n"
resp_json += "\t\t\t\t\t\"rows\": " + str(entry[0]) + "\n"
resp_json += "\t\t\t\t},"
for j, entry in enumerate(responsibilities):
resp_json += "{\n"
resp_json += '\t\t\t\t\t"name": "' + entry[1] + '",\n'
resp_json += '\t\t\t\t\t"rows": ' + str(entry[0]) + "\n"
resp_json += "\t\t\t\t},"
if j >= 9:
break
if j >= 9:
break
resp_json = resp_json[:-1]
resp_json += "]\n\t\t\t},"
resp_json = resp_json[:-1]
resp_json += "]\n\t\t\t},"
resp_json = resp_json[:-1]
print(",\n\t\t\"responsibilities\": {\n" + message_json + "\t\t\t\"authors\": [\n\t\t\t" + resp_json + "]\n\t\t}", end="")
resp_json = resp_json[:-1]
print(',\n\t\t"responsibilities": {\n' + message_json + '\t\t\t"authors": [\n\t\t\t' + resp_json + "]\n\t\t}", end="")
def output_xml(self):
message_xml = "\t\t<message>" + _(RESPONSIBILITIES_INFO_TEXT) + "</message>\n"
resp_xml = ""
def output_xml(self):
message_xml = "\t\t<message>" + _(RESPONSIBILITIES_INFO_TEXT) + "</message>\n"
resp_xml = ""
for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
if responsibilities:
author_email = self.changes.get_latest_email_by_author(i)
for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
if responsibilities:
author_email = self.changes.get_latest_email_by_author(i)
resp_xml += "\t\t\t<author>\n"
resp_xml += "\t\t\t\t<name>" + i + "</name>\n"
resp_xml += "\t\t\t\t<email>" + author_email + "</email>\n"
resp_xml += "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
resp_xml += "\t\t\t\t<files>\n"
resp_xml += "\t\t\t<author>\n"
resp_xml += "\t\t\t\t<name>" + i + "</name>\n"
resp_xml += "\t\t\t\t<email>" + author_email + "</email>\n"
resp_xml += "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
resp_xml += "\t\t\t\t<files>\n"
for j, entry in enumerate(responsibilities):
resp_xml += "\t\t\t\t\t<file>\n"
resp_xml += "\t\t\t\t\t\t<name>" + entry[1] + "</name>\n"
resp_xml += "\t\t\t\t\t\t<rows>" + str(entry[0]) + "</rows>\n"
resp_xml += "\t\t\t\t\t</file>\n"
for j, entry in enumerate(responsibilities):
resp_xml += "\t\t\t\t\t<file>\n"
resp_xml += "\t\t\t\t\t\t<name>" + entry[1] + "</name>\n"
resp_xml += "\t\t\t\t\t\t<rows>" + str(entry[0]) + "</rows>\n"
resp_xml += "\t\t\t\t\t</file>\n"
if j >= 9:
break
if j >= 9:
break
resp_xml += "\t\t\t\t</files>\n"
resp_xml += "\t\t\t</author>\n"
resp_xml += "\t\t\t\t</files>\n"
resp_xml += "\t\t\t</author>\n"
print("\t<responsibilities>\n" + message_xml + "\t\t<authors>\n" + resp_xml + "\t\t</authors>\n\t</responsibilities>")
print("\t<responsibilities>\n" + message_xml + "\t\t<authors>\n" + resp_xml + "\t\t</authors>\n\t</responsibilities>")

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import textwrap
from ..localization import N_
from .. import format, gravatar, terminal, timeline
@ -27,182 +26,195 @@ from .outputable import Outputable
TIMELINE_INFO_TEXT = N_("The following history timeline has been gathered from the repository")
MODIFIED_ROWS_TEXT = N_("Modified Rows:")
def __output_row__text__(timeline_data, periods, names):
print("\n" + terminal.__bold__ + terminal.ljust(_("Author"), 20), end=" ")
print("\n" + terminal.__bold__ + terminal.ljust(_("Author"), 20), end=" ")
for period in periods:
print(terminal.rjust(period, 10), end=" ")
for period in periods:
print(terminal.rjust(period, 10), end=" ")
print(terminal.__normal__)
print(terminal.__normal__)
for name in names:
if timeline_data.is_author_in_periods(periods, name[0]):
print(terminal.ljust(name[0], 20)[0:20 - terminal.get_excess_column_count(name[0])], end=" ")
for name in names:
if timeline_data.is_author_in_periods(periods, name[0]):
print(terminal.ljust(name[0], 20)[0:20 - terminal.get_excess_column_count(name[0])], end=" ")
for period in periods:
multiplier = timeline_data.get_multiplier(period, 9)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "-" + signs[0] * "+")
print (("." if timeline_data.is_author_in_period(period, name[0]) and
len(signs_str) == 0 else signs_str).rjust(10), end=" ")
print("")
for period in periods:
multiplier = timeline_data.get_multiplier(period, 9)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = signs[1] * "-" + signs[0] * "+"
print(
("." if timeline_data.is_author_in_period(period, name[0]) and len(signs_str) == 0 else signs_str).rjust(
10
),
end=" ",
)
print("")
print(terminal.__bold__ + terminal.ljust(_(MODIFIED_ROWS_TEXT), 20) + terminal.__normal__, end=" ")
print(terminal.__bold__ + terminal.ljust(_(MODIFIED_ROWS_TEXT), 20) + terminal.__normal__, end=" ")
for period in periods:
total_changes = str(timeline_data.get_total_changes_in_period(period)[2])
for period in periods:
total_changes = str(timeline_data.get_total_changes_in_period(period)[2])
if hasattr(total_changes, 'decode'):
total_changes = total_changes.decode("utf-8", "replace")
if hasattr(total_changes, "decode"):
total_changes = total_changes.decode("utf-8", "replace")
print(terminal.rjust(total_changes, 10), end=" ")
print(terminal.rjust(total_changes, 10), end=" ")
print("")
print("")
def __output_row__html__(timeline_data, periods, names):
timeline_xml = "<table class=\"git full\"><thead><tr><th>" + _("Author") + "</th>"
timeline_xml = '<table class="git full"><thead><tr><th>' + _("Author") + "</th>"
for period in periods:
timeline_xml += "<th>" + str(period) + "</th>"
for period in periods:
timeline_xml += "<th>" + str(period) + "</th>"
timeline_xml += "</tr></thead><tbody>"
i = 0
timeline_xml += "</tr></thead><tbody>"
i = 0
for name in names:
if timeline_data.is_author_in_periods(periods, name[0]):
timeline_xml += "<tr" + (" class=\"odd\">" if i % 2 == 1 else ">")
for name in names:
if timeline_data.is_author_in_periods(periods, name[0]):
timeline_xml += "<tr" + (' class="odd">' if i % 2 == 1 else ">")
if format.get_selected() == "html":
timeline_xml += "<td><img src=\"{0}\"/>{1}</td>".format(gravatar.get_url(name[1]), name[0])
else:
timeline_xml += "<td>" + name[0] + "</td>"
if format.get_selected() == "html":
timeline_xml += '<td><img src="{0}"/>{1}</td>'.format(gravatar.get_url(name[1]), name[0])
else:
timeline_xml += "<td>" + name[0] + "</td>"
for period in periods:
multiplier = timeline_data.get_multiplier(period, 18)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "<div class=\"remove\">&nbsp;</div>" + signs[0] * "<div class=\"insert\">&nbsp;</div>")
for period in periods:
multiplier = timeline_data.get_multiplier(period, 18)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = signs[1] * '<div class="remove">&nbsp;</div>' + signs[0] * '<div class="insert">&nbsp;</div>'
timeline_xml += "<td>" + ("." if timeline_data.is_author_in_period(period, name[0]) and len(signs_str) == 0 else signs_str)
timeline_xml += "</td>"
timeline_xml += "</tr>"
i = i + 1
timeline_xml += "<td>" + (
"." if timeline_data.is_author_in_period(period, name[0]) and len(signs_str) == 0 else signs_str
)
timeline_xml += "</td>"
timeline_xml += "</tr>"
i = i + 1
timeline_xml += "<tfoot><tr><td><strong>" + _(MODIFIED_ROWS_TEXT) + "</strong></td>"
timeline_xml += "<tfoot><tr><td><strong>" + _(MODIFIED_ROWS_TEXT) + "</strong></td>"
for period in periods:
total_changes = timeline_data.get_total_changes_in_period(period)
timeline_xml += "<td>" + str(total_changes[2]) + "</td>"
for period in periods:
total_changes = timeline_data.get_total_changes_in_period(period)
timeline_xml += "<td>" + str(total_changes[2]) + "</td>"
timeline_xml += "</tr></tfoot></tbody></table>"
print(timeline_xml)
timeline_xml += "</tr></tfoot></tbody></table>"
print(timeline_xml)
class TimelineOutput(Outputable):
def __init__(self, changes, useweeks):
self.changes = changes
self.useweeks = useweeks
Outputable.__init__(self)
def __init__(self, changes, useweeks):
self.changes = changes
self.useweeks = useweeks
Outputable.__init__(self)
def output_text(self):
if self.changes.get_commits():
print("\n" + textwrap.fill(_(TIMELINE_INFO_TEXT) + ":", width=terminal.get_size()[0]))
def output_text(self):
if self.changes.get_commits():
print("\n" + textwrap.fill(_(TIMELINE_INFO_TEXT) + ":", width=terminal.get_size()[0]))
timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
(width, _unused) = terminal.get_size()
max_periods_per_row = int((width - 21) / 11)
timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
(width, _unused) = terminal.get_size()
max_periods_per_row = int((width - 21) / 11)
for i in range(0, len(periods), max_periods_per_row):
__output_row__text__(timeline_data, periods[i:i+max_periods_per_row], names)
for i in range(0, len(periods), max_periods_per_row):
__output_row__text__(timeline_data, periods[i:i + max_periods_per_row], names)
def output_html(self):
if self.changes.get_commits():
timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
max_periods_per_row = 8
def output_html(self):
if self.changes.get_commits():
timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
max_periods_per_row = 8
timeline_xml = "<div><div id=\"timeline\" class=\"box\">"
timeline_xml += "<p>" + _(TIMELINE_INFO_TEXT) + ".</p>"
print(timeline_xml)
timeline_xml = '<div><div id="timeline" class="box">'
timeline_xml += "<p>" + _(TIMELINE_INFO_TEXT) + ".</p>"
print(timeline_xml)
for i in range(0, len(periods), max_periods_per_row):
__output_row__html__(timeline_data, periods[i:i+max_periods_per_row], names)
for i in range(0, len(periods), max_periods_per_row):
__output_row__html__(timeline_data, periods[i:i + max_periods_per_row], names)
timeline_xml = "</div></div>"
print(timeline_xml)
timeline_xml = "</div></div>"
print(timeline_xml)
def output_json(self):
if self.changes.get_commits():
message_json = "\t\t\t\"message\": \"" + _(TIMELINE_INFO_TEXT) + "\",\n"
timeline_json = ""
periods_json = "\t\t\t\"period_length\": \"{0}\",\n".format("week" if self.useweeks else "month")
periods_json += "\t\t\t\"periods\": [\n\t\t\t"
def output_json(self):
if self.changes.get_commits():
message_json = '\t\t\t"message": "' + _(TIMELINE_INFO_TEXT) + '",\n'
timeline_json = ""
periods_json = '\t\t\t"period_length": "{0}",\n'.format("week" if self.useweeks else "month")
periods_json += '\t\t\t"periods": [\n\t\t\t'
timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
for period in periods:
name_json = "\t\t\t\t\"name\": \"" + str(period) + "\",\n"
authors_json = "\t\t\t\t\"authors\": [\n\t\t\t\t"
for period in periods:
name_json = '\t\t\t\t"name": "' + str(period) + '",\n'
authors_json = '\t\t\t\t"authors": [\n\t\t\t\t'
for name in names:
if timeline_data.is_author_in_period(period, name[0]):
multiplier = timeline_data.get_multiplier(period, 24)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "-" + signs[0] * "+")
for name in names:
if timeline_data.is_author_in_period(period, name[0]):
multiplier = timeline_data.get_multiplier(period, 24)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = signs[1] * "-" + signs[0] * "+"
if len(signs_str) == 0:
signs_str = "."
if len(signs_str) == 0:
signs_str = "."
authors_json += "{\n\t\t\t\t\t\"name\": \"" + name[0] + "\",\n"
authors_json += "\t\t\t\t\t\"email\": \"" + name[1] + "\",\n"
authors_json += "\t\t\t\t\t\"gravatar\": \"" + gravatar.get_url(name[1]) + "\",\n"
authors_json += "\t\t\t\t\t\"work\": \"" + signs_str + "\"\n\t\t\t\t},"
else:
authors_json = authors_json[:-1]
authors_json += '{\n\t\t\t\t\t"name": "' + name[0] + '",\n'
authors_json += '\t\t\t\t\t"email": "' + name[1] + '",\n'
authors_json += '\t\t\t\t\t"gravatar": "' + gravatar.get_url(name[1]) + '",\n'
authors_json += '\t\t\t\t\t"work": "' + signs_str + '"\n\t\t\t\t},'
else:
authors_json = authors_json[:-1]
authors_json += "],\n"
modified_rows_json = "\t\t\t\t\"modified_rows\": " + \
str(timeline_data.get_total_changes_in_period(period)[2]) + "\n"
timeline_json += "{\n" + name_json + authors_json + modified_rows_json + "\t\t\t},"
else:
timeline_json = timeline_json[:-1]
authors_json += "],\n"
modified_rows_json = (
'\t\t\t\t"modified_rows": ' + str(timeline_data.get_total_changes_in_period(period)[2]) + "\n"
)
timeline_json += "{\n" + name_json + authors_json + modified_rows_json + "\t\t\t},"
else:
timeline_json = timeline_json[:-1]
print(",\n\t\t\"timeline\": {\n" + message_json + periods_json + timeline_json + "]\n\t\t}", end="")
print(',\n\t\t"timeline": {\n' + message_json + periods_json + timeline_json + "]\n\t\t}", end="")
def output_xml(self):
if self.changes.get_commits():
message_xml = "\t\t<message>" + _(TIMELINE_INFO_TEXT) + "</message>\n"
timeline_xml = ""
periods_xml = "\t\t<periods length=\"{0}\">\n".format("week" if self.useweeks else "month")
def output_xml(self):
if self.changes.get_commits():
message_xml = "\t\t<message>" + _(TIMELINE_INFO_TEXT) + "</message>\n"
timeline_xml = ""
periods_xml = '\t\t<periods length="{0}">\n'.format("week" if self.useweeks else "month")
timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
for period in periods:
name_xml = "\t\t\t\t<name>" + str(period) + "</name>\n"
authors_xml = "\t\t\t\t<authors>\n"
for period in periods:
name_xml = "\t\t\t\t<name>" + str(period) + "</name>\n"
authors_xml = "\t\t\t\t<authors>\n"
for name in names:
if timeline_data.is_author_in_period(period, name[0]):
multiplier = timeline_data.get_multiplier(period, 24)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "-" + signs[0] * "+")
for name in names:
if timeline_data.is_author_in_period(period, name[0]):
multiplier = timeline_data.get_multiplier(period, 24)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = signs[1] * "-" + signs[0] * "+"
if len(signs_str) == 0:
signs_str = "."
if len(signs_str) == 0:
signs_str = "."
authors_xml += "\t\t\t\t\t<author>\n\t\t\t\t\t\t<name>" + name[0] + "</name>\n"
authors_xml += "\t\t\t\t\t\t<email>" + name[1] + "</email>\n"
authors_xml += "\t\t\t\t\t\t<gravatar>" + gravatar.get_url(name[1]) + "</gravatar>\n"
authors_xml += "\t\t\t\t\t\t<work>" + signs_str + "</work>\n\t\t\t\t\t</author>\n"
authors_xml += "\t\t\t\t\t<author>\n\t\t\t\t\t\t<name>" + name[0] + "</name>\n"
authors_xml += "\t\t\t\t\t\t<email>" + name[1] + "</email>\n"
authors_xml += "\t\t\t\t\t\t<gravatar>" + gravatar.get_url(name[1]) + "</gravatar>\n"
authors_xml += "\t\t\t\t\t\t<work>" + signs_str + "</work>\n\t\t\t\t\t</author>\n"
authors_xml += "\t\t\t\t</authors>\n"
modified_rows_xml = "\t\t\t\t<modified_rows>" + \
str(timeline_data.get_total_changes_in_period(period)[2]) + "</modified_rows>\n"
timeline_xml += "\t\t\t<period>\n" + name_xml + authors_xml + modified_rows_xml + "\t\t\t</period>\n"
authors_xml += "\t\t\t\t</authors>\n"
modified_rows_xml = (
"\t\t\t\t<modified_rows>"
+ str(timeline_data.get_total_changes_in_period(period)[2])
+ "</modified_rows>\n"
)
timeline_xml += "\t\t\t<period>\n" + name_xml + authors_xml + modified_rows_xml + "\t\t\t</period>\n"
print("\t<timeline>\n" + message_xml + periods_xml + timeline_xml + "\t\t</periods>\n\t</timeline>")
print("\t<timeline>\n" + message_xml + periods_xml + timeline_xml + "\t\t</periods>\n\t</timeline>")

View File

@ -18,20 +18,19 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
class ResponsibiltyEntry(object):
blames = {}
blames = {}
class Responsibilities(object):
@staticmethod
def get(blame, author_name):
author_blames = {}
@staticmethod
def get(blame, author_name):
author_blames = {}
for i in list(blame.blames.items()):
if author_name == i[0][0]:
total_rows = i[1].rows - i[1].comments
if total_rows > 0:
author_blames[i[0][1]] = total_rows
for i in list(blame.blames.items()):
if author_name == i[0][0]:
total_rows = i[1].rows - i[1].comments
if total_rows > 0:
author_blames[i[0][1]] = total_rows
return sorted(author_blames.items())
return sorted(author_blames.items())

View File

@ -29,130 +29,151 @@ __normal__ = "\033[0;0m"
DEFAULT_TERMINAL_SIZE = (80, 25)
def __get_size_windows__():
res = None
try:
from ctypes import windll, create_string_buffer
res = None
try:
from ctypes import windll, create_string_buffer
handler = windll.kernel32.GetStdHandle(-12) # stderr
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handler, csbi)
except:
return DEFAULT_TERMINAL_SIZE
handler = windll.kernel32.GetStdHandle(-12) # stderr
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handler, csbi)
except:
return DEFAULT_TERMINAL_SIZE
if res:
import struct
(_, _, _, _, _, left, top, right, bottom, _, _) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
else:
return DEFAULT_TERMINAL_SIZE
if res:
import struct
(_, _, _, _, _, left, top, right, bottom, _, _) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
else:
return DEFAULT_TERMINAL_SIZE
def __get_size_linux__():
def ioctl_get_window_size(file_descriptor):
try:
import fcntl, termios, struct
size = struct.unpack('hh', fcntl.ioctl(file_descriptor, termios.TIOCGWINSZ, "1234"))
except:
return DEFAULT_TERMINAL_SIZE
def ioctl_get_window_size(file_descriptor):
try:
import fcntl, termios, struct
return size
size = struct.unpack("hh", fcntl.ioctl(file_descriptor, termios.TIOCGWINSZ, "1234"))
except:
return DEFAULT_TERMINAL_SIZE
size = ioctl_get_window_size(0) or ioctl_get_window_size(1) or ioctl_get_window_size(2)
return size
if not size:
try:
file_descriptor = os.open(os.ctermid(), os.O_RDONLY)
size = ioctl_get_window_size(file_descriptor)
os.close(file_descriptor)
except:
pass
if not size:
try:
size = (os.environ["LINES"], os.environ["COLUMNS"])
except:
return DEFAULT_TERMINAL_SIZE
size = ioctl_get_window_size(0) or ioctl_get_window_size(1) or ioctl_get_window_size(2)
if not size:
try:
file_descriptor = os.open(os.ctermid(), os.O_RDONLY)
size = ioctl_get_window_size(file_descriptor)
os.close(file_descriptor)
except:
pass
if not size:
try:
size = (os.environ["LINES"], os.environ["COLUMNS"])
except:
return DEFAULT_TERMINAL_SIZE
return int(size[1]), int(size[0])
return int(size[1]), int(size[0])
def clear_row():
print("\r", end="")
print("\r", end="")
def skip_escapes(skip):
if skip:
global __bold__
global __normal__
__bold__ = ""
__normal__ = ""
if skip:
global __bold__
global __normal__
__bold__ = ""
__normal__ = ""
def printb(string):
print(__bold__ + string + __normal__)
print(__bold__ + string + __normal__)
def get_size():
width = 0
height = 0
width = 0
height = 0
if sys.stdout.isatty():
current_os = platform.system()
if sys.stdout.isatty():
current_os = platform.system()
if current_os == "Windows":
(width, height) = __get_size_windows__()
elif current_os == "Linux" or current_os == "Darwin" or current_os.startswith("CYGWIN"):
(width, height) = __get_size_linux__()
if current_os == "Windows":
(width, height) = __get_size_windows__()
elif current_os == "Linux" or current_os == "Darwin" or current_os.startswith("CYGWIN"):
(width, height) = __get_size_linux__()
if width > 0:
return (width, height)
if width > 0:
return (width, height)
return DEFAULT_TERMINAL_SIZE
return DEFAULT_TERMINAL_SIZE
def set_stdout_encoding():
if not sys.stdout.isatty() and sys.version_info < (3,):
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
if not sys.stdout.isatty() and sys.version_info < (3,):
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
def set_stdin_encoding():
if not sys.stdin.isatty() and sys.version_info < (3,):
sys.stdin = codecs.getreader("utf-8")(sys.stdin)
if not sys.stdin.isatty() and sys.version_info < (3,):
sys.stdin = codecs.getreader("utf-8")(sys.stdin)
def convert_command_line_to_utf8():
try:
argv = []
try:
argv = []
for arg in sys.argv:
argv.append(arg.decode(sys.stdin.encoding, "replace"))
for arg in sys.argv:
argv.append(arg.decode(sys.stdin.encoding, "replace"))
return argv
except AttributeError:
return sys.argv
return argv
except AttributeError:
return sys.argv
def check_terminal_encoding():
if sys.stdout.isatty() and (sys.stdout.encoding == None or sys.stdin.encoding == None):
print(_("WARNING: The terminal encoding is not correctly configured. gitinspector might malfunction. "
"The encoding can be configured with the environment variable 'PYTHONIOENCODING'."), file=sys.stderr)
if sys.stdout.isatty() and (sys.stdout.encoding is None or sys.stdin.encoding is None):
print(
_(
"WARNING: The terminal encoding is not correctly configured. gitinspector might malfunction. "
"The encoding can be configured with the environment variable 'PYTHONIOENCODING'."
),
file=sys.stderr,
)
def get_excess_column_count(string):
width_mapping = {'F': 2, 'H': 1, 'W': 2, 'Na': 1, 'N': 1, 'A': 1}
result = 0
width_mapping = {"F": 2, "H": 1, "W": 2, "Na": 1, "N": 1, "A": 1}
result = 0
for i in string:
width = unicodedata.east_asian_width(i)
result += width_mapping[width]
for i in string:
width = unicodedata.east_asian_width(i)
result += width_mapping[width]
return result - len(string)
return result - len(string)
def ljust(string, pad):
return string.ljust(pad - get_excess_column_count(string))
return string.ljust(pad - get_excess_column_count(string))
def rjust(string, pad):
return string.rjust(pad - get_excess_column_count(string))
return string.rjust(pad - get_excess_column_count(string))
def output_progress(text, pos, length):
if sys.stdout.isatty():
(width, _unused) = get_size()
progress_text = text.format(100 * pos / length)
if sys.stdout.isatty():
(width, _unused) = get_size()
progress_text = text.format(100 * pos / length)
if len(progress_text) > width:
progress_text = "...%s" % progress_text[-width+3:]
if len(progress_text) > width:
progress_text = "...%s" % progress_text[-width + 3:]
print("\r{0}\r{1}".format(" " * width, progress_text), end="")
sys.stdout.flush()
print("\r{0}\r{1}".format(" " * width, progress_text), end="")
sys.stdout.flush()

View File

@ -20,81 +20,81 @@
import datetime
class TimelineData(object):
def __init__(self, changes, useweeks):
authordateinfo_list = sorted(changes.get_authordateinfo_list().items())
self.changes = changes
self.entries = {}
self.total_changes_by_period = {}
self.useweeks = useweeks
def __init__(self, changes, useweeks):
authordateinfo_list = sorted(changes.get_authordateinfo_list().items())
self.changes = changes
self.entries = {}
self.total_changes_by_period = {}
self.useweeks = useweeks
for i in authordateinfo_list:
key = None
for i in authordateinfo_list:
key = None
if useweeks:
yearweek = datetime.date(int(i[0][0][0:4]), int(i[0][0][5:7]), int(i[0][0][8:10])).isocalendar()
key = (i[0][1], str(yearweek[0]) + "W" + "{0:02d}".format(yearweek[1]))
else:
key = (i[0][1], i[0][0][0:7])
if useweeks:
yearweek = datetime.date(int(i[0][0][0:4]), int(i[0][0][5:7]), int(i[0][0][8:10])).isocalendar()
key = (i[0][1], str(yearweek[0]) + "W" + "{0:02d}".format(yearweek[1]))
else:
key = (i[0][1], i[0][0][0:7])
if self.entries.get(key, None) == None:
self.entries[key] = i[1]
else:
self.entries[key].insertions += i[1].insertions
self.entries[key].deletions += i[1].deletions
if self.entries.get(key, None) is None:
self.entries[key] = i[1]
else:
self.entries[key].insertions += i[1].insertions
self.entries[key].deletions += i[1].deletions
for period in self.get_periods():
total_insertions = 0
total_deletions = 0
for period in self.get_periods():
total_insertions = 0
total_deletions = 0
for author in self.get_authors():
entry = self.entries.get((author[0], period), None)
if entry != None:
total_insertions += entry.insertions
total_deletions += entry.deletions
for author in self.get_authors():
entry = self.entries.get((author[0], period), None)
if entry is not None:
total_insertions += entry.insertions
total_deletions += entry.deletions
self.total_changes_by_period[period] = (total_insertions, total_deletions,
total_insertions + total_deletions)
self.total_changes_by_period[period] = (total_insertions, total_deletions, total_insertions + total_deletions)
def get_periods(self):
return sorted(set([i[1] for i in self.entries]))
def get_periods(self):
return sorted(set([i[1] for i in self.entries]))
def get_total_changes_in_period(self, period):
return self.total_changes_by_period[period]
def get_total_changes_in_period(self, period):
return self.total_changes_by_period[period]
def get_authors(self):
return sorted(set([(i[0][0], self.changes.get_latest_email_by_author(i[0][0])) for i in list(self.entries.items())]))
def get_authors(self):
return sorted(set([(i[0][0], self.changes.get_latest_email_by_author(i[0][0])) for i in list(self.entries.items())]))
def get_author_signs_in_period(self, author, period, multiplier):
authorinfo = self.entries.get((author, period), None)
total = float(self.total_changes_by_period[period][2])
def get_author_signs_in_period(self, author, period, multiplier):
authorinfo = self.entries.get((author, period), None)
total = float(self.total_changes_by_period[period][2])
if authorinfo:
i = multiplier * (self.entries[(author, period)].insertions / total)
j = multiplier * (self.entries[(author, period)].deletions / total)
return (int(i), int(j))
else:
return (0, 0)
if authorinfo:
i = multiplier * (self.entries[(author, period)].insertions / total)
j = multiplier * (self.entries[(author, period)].deletions / total)
return (int(i), int(j))
else:
return (0, 0)
def get_multiplier(self, period, max_width):
multiplier = 0
def get_multiplier(self, period, max_width):
multiplier = 0
while True:
for i in self.entries:
entry = self.entries.get(i)
while True:
for i in self.entries:
entry = self.entries.get(i)
if period == i[1]:
changes_in_period = float(self.total_changes_by_period[i[1]][2])
if multiplier * (entry.insertions + entry.deletions) / changes_in_period > max_width:
return multiplier
if period == i[1]:
changes_in_period = float(self.total_changes_by_period[i[1]][2])
if multiplier * (entry.insertions + entry.deletions) / changes_in_period > max_width:
return multiplier
multiplier += 0.25
multiplier += 0.25
def is_author_in_period(self, period, author):
return self.entries.get((author, period), None) != None
def is_author_in_period(self, period, author):
return self.entries.get((author, period), None) is not None
def is_author_in_periods(self, periods, author):
for period in periods:
if self.is_author_in_period(period, author):
return True
return False
def is_author_in_periods(self, periods, author):
for period in periods:
if self.is_author_in_period(period, author):
return True
return False

View File

@ -18,17 +18,21 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from . import localization
localization.init()
__version__ = "0.5.0dev"
__doc__ = _("""Copyright © 2012-2015 Ejwa Software. All rights reserved.
__doc__ = _(
"""Copyright © 2012-2015 Ejwa Software. All rights reserved.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Written by Adam Waldenberg.""")
Written by Adam Waldenberg."""
)
def output():
print("gitinspector {0}\n".format(__version__) + __doc__)
print("gitinspector {0}\n".format(__version__) + __doc__)

View File

@ -19,29 +19,31 @@
from __future__ import unicode_literals
import os
import sys
import unittest
import gitinspector.comment
def __test_extension__(commented_file, extension):
base = os.path.dirname(os.path.realpath(__file__))
tex_file = open(base + commented_file, "r")
tex = tex_file.readlines()
tex_file.close()
base = os.path.dirname(os.path.realpath(__file__))
tex_file = open(base + commented_file, "r")
tex = tex_file.readlines()
tex_file.close()
is_inside_comment = False
comment_counter = 0
for i in tex:
(_, is_inside_comment) = gitinspector.comment.handle_comment_block(is_inside_comment, extension, i)
if is_inside_comment or gitinspector.comment.is_comment(extension, i):
comment_counter += 1
is_inside_comment = False
comment_counter = 0
for i in tex:
(_, is_inside_comment) = gitinspector.comment.handle_comment_block(is_inside_comment, extension, i)
if is_inside_comment or gitinspector.comment.is_comment(extension, i):
comment_counter += 1
return comment_counter
return comment_counter
class TexFileTest(unittest.TestCase):
def test(self):
comment_counter = __test_extension__("/resources/commented_file.tex", "tex")
self.assertEqual(comment_counter, 30)
comment_counter = __test_extension__("/resources/commented_file.tex", "tex")
self.assertEqual(comment_counter, 30)
class CppFileTest(unittest.TestCase):
def test(self):