Massive Linting and clean up

This commit is contained in:
JP White 2021-02-27 22:39:08 -05:00
parent 2dd72c1d96
commit c17c5795ad
31 changed files with 2233 additions and 1822 deletions

View File

@ -26,14 +26,11 @@ jobs:
- name: Install dependencies - name: Install dependencies
run: | run: |
python -m pip install --upgrade pip python -m pip install --upgrade pip
make requirements
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
make test-coverage
- name: Lint with flake8 - name: Lint with flake8
run: | run: |
# stop the build if there are Python syntax errors or undefined names make lint
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest - name: Test with pytest
run: | run: |
pytest make test-coverage

View File

@ -35,9 +35,13 @@ clean-test: ## remove test and coverage artifacts
rm -fr .pytest_cache rm -fr .pytest_cache
lint: ## check style with flake8 lint: ## check style with flake8
black gitinspector --line-length 120 # stop the build if there are Python syntax errors or undefined names
find . -name '*.py' -exec autopep8 -i {} --max-line-length=120 \; flake8 gitinspector tests --count --select=E9,F63,F7,F82 --show-source --statistics --builtins="_"
flake8 gitinspector tests --max-line-length=120 # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 gitinspector tests --count --ignore=E722,W503,E401,C901 --exit-zero --max-complexity=10 --max-line-length=127 --statistics --builtins="_"
format: ## auto format all the code with black
black gitinspector --line-length 127
test: ## run tests quickly with the default Python test: ## run tests quickly with the default Python
pytest pytest

View File

@ -21,43 +21,45 @@ import os
import subprocess import subprocess
import sys import sys
def get_basedir(): def get_basedir():
if hasattr(sys, "frozen"): # exists when running via py2exe if hasattr(sys, "frozen"): # exists when running via py2exe
return sys.prefix return sys.prefix
else: else:
return os.path.dirname(os.path.realpath(__file__)) return os.path.dirname(os.path.realpath(__file__))
def get_basedir_git(path=None): def get_basedir_git(path=None):
previous_directory = None previous_directory = None
if path != None: if path is not None:
previous_directory = os.getcwd() previous_directory = os.getcwd()
os.chdir(path) os.chdir(path)
bare_command = subprocess.Popen(["git", "rev-parse", "--is-bare-repository"], bare_command = subprocess.Popen(
stdout=subprocess.PIPE, stderr=open(os.devnull, "w")) ["git", "rev-parse", "--is-bare-repository"], stdout=subprocess.PIPE, stderr=open(os.devnull, "w")
)
isbare = bare_command.stdout.readlines() isbare = bare_command.stdout.readlines()
bare_command.wait() bare_command.wait()
if bare_command.returncode != 0: if bare_command.returncode != 0:
sys.exit(_("Error processing git repository at \"%s\"." % os.getcwd())) sys.exit(_('Error processing git repository at "%s".' % os.getcwd()))
isbare = (isbare[0].decode("utf-8", "replace").strip() == "true") isbare = isbare[0].decode("utf-8", "replace").strip() == "true"
absolute_path = None absolute_path = None
if isbare: if isbare:
absolute_path = subprocess.Popen(["git", "rev-parse", "--git-dir"], stdout=subprocess.PIPE).stdout absolute_path = subprocess.Popen(["git", "rev-parse", "--git-dir"], stdout=subprocess.PIPE).stdout
else: else:
absolute_path = subprocess.Popen(["git", "rev-parse", "--show-toplevel"], absolute_path = subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE).stdout
stdout=subprocess.PIPE).stdout
absolute_path = absolute_path.readlines() absolute_path = absolute_path.readlines()
if len(absolute_path) == 0: if len(absolute_path) == 0:
sys.exit(_("Unable to determine absolute path of git repository.")) sys.exit(_("Unable to determine absolute path of git repository."))
if path != None: if path is not None:
os.chdir(previous_directory) os.chdir(previous_directory)
return absolute_path[0].decode("utf-8", "replace").strip() return absolute_path[0].decode("utf-8", "replace").strip()

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import datetime import datetime
import multiprocessing import multiprocessing
import re import re
@ -30,172 +29,189 @@ from . import comment, extensions, filtering, format, interval, terminal
NUM_THREADS = multiprocessing.cpu_count() NUM_THREADS = multiprocessing.cpu_count()
class BlameEntry(object): class BlameEntry(object):
rows = 0 rows = 0
skew = 0 # Used when calculating average code age. skew = 0 # Used when calculating average code age.
comments = 0 comments = 0
__thread_lock__ = threading.BoundedSemaphore(NUM_THREADS) __thread_lock__ = threading.BoundedSemaphore(NUM_THREADS)
__blame_lock__ = threading.Lock() __blame_lock__ = threading.Lock()
AVG_DAYS_PER_MONTH = 30.4167 AVG_DAYS_PER_MONTH = 30.4167
class BlameThread(threading.Thread): class BlameThread(threading.Thread):
def __init__(self, useweeks, changes, blame_command, extension, blames, filename): def __init__(self, useweeks, changes, blame_command, extension, blames, filename):
__thread_lock__.acquire() # Lock controlling the number of threads running __thread_lock__.acquire() # Lock controlling the number of threads running
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.useweeks = useweeks self.useweeks = useweeks
self.changes = changes self.changes = changes
self.blame_command = blame_command self.blame_command = blame_command
self.extension = extension self.extension = extension
self.blames = blames self.blames = blames
self.filename = filename self.filename = filename
self.is_inside_comment = False self.is_inside_comment = False
def __clear_blamechunk_info__(self): def __clear_blamechunk_info__(self):
self.blamechunk_email = None self.blamechunk_email = None
self.blamechunk_is_last = False self.blamechunk_is_last = False
self.blamechunk_is_prior = False self.blamechunk_is_prior = False
self.blamechunk_revision = None self.blamechunk_revision = None
self.blamechunk_time = None self.blamechunk_time = None
def __handle_blamechunk_content__(self, content): def __handle_blamechunk_content__(self, content):
author = None author = None
(comments, self.is_inside_comment) = comment.handle_comment_block(self.is_inside_comment, self.extension, content) (comments, self.is_inside_comment) = comment.handle_comment_block(self.is_inside_comment, self.extension, content)
if self.blamechunk_is_prior and interval.get_since(): if self.blamechunk_is_prior and interval.get_since():
return return
try: try:
author = self.changes.get_latest_author_by_email(self.blamechunk_email) author = self.changes.get_latest_author_by_email(self.blamechunk_email)
except KeyError: except KeyError:
return return
if not filtering.set_filtered(author, "author") and not \ if (
filtering.set_filtered(self.blamechunk_email, "email") and not \ not filtering.set_filtered(author, "author")
filtering.set_filtered(self.blamechunk_revision, "revision"): and not filtering.set_filtered(self.blamechunk_email, "email")
and not filtering.set_filtered(self.blamechunk_revision, "revision")
):
__blame_lock__.acquire() # Global lock used to protect calls from here... __blame_lock__.acquire() # Global lock used to protect calls from here...
if self.blames.get((author, self.filename), None) == None: if self.blames.get((author, self.filename), None) is None:
self.blames[(author, self.filename)] = BlameEntry() self.blames[(author, self.filename)] = BlameEntry()
self.blames[(author, self.filename)].comments += comments self.blames[(author, self.filename)].comments += comments
self.blames[(author, self.filename)].rows += 1 self.blames[(author, self.filename)].rows += 1
if (self.blamechunk_time - self.changes.first_commit_date).days > 0: if (self.blamechunk_time - self.changes.first_commit_date).days > 0:
self.blames[(author, self.filename)].skew += ((self.changes.last_commit_date - self.blamechunk_time).days / self.blames[(author, self.filename)].skew += (self.changes.last_commit_date - self.blamechunk_time).days / (
(7.0 if self.useweeks else AVG_DAYS_PER_MONTH)) 7.0 if self.useweeks else AVG_DAYS_PER_MONTH
)
__blame_lock__.release() # ...to here. __blame_lock__.release() # ...to here.
def run(self): def run(self):
git_blame_r = subprocess.Popen(self.blame_command, stdout=subprocess.PIPE).stdout git_blame_r = subprocess.Popen(self.blame_command, stdout=subprocess.PIPE).stdout
rows = git_blame_r.readlines() rows = git_blame_r.readlines()
git_blame_r.close() git_blame_r.close()
self.__clear_blamechunk_info__() self.__clear_blamechunk_info__()
#pylint: disable=W0201 # pylint: disable=W0201
for j in range(0, len(rows)): for j in range(0, len(rows)):
row = rows[j].decode("utf-8", "replace").strip() row = rows[j].decode("utf-8", "replace").strip()
keyval = row.split(" ", 2) keyval = row.split(" ", 2)
if self.blamechunk_is_last: if self.blamechunk_is_last:
self.__handle_blamechunk_content__(row) self.__handle_blamechunk_content__(row)
self.__clear_blamechunk_info__() self.__clear_blamechunk_info__()
elif keyval[0] == "boundary": elif keyval[0] == "boundary":
self.blamechunk_is_prior = True self.blamechunk_is_prior = True
elif keyval[0] == "author-mail": elif keyval[0] == "author-mail":
self.blamechunk_email = keyval[1].lstrip("<").rstrip(">") self.blamechunk_email = keyval[1].lstrip("<").rstrip(">")
elif keyval[0] == "author-time": elif keyval[0] == "author-time":
self.blamechunk_time = datetime.date.fromtimestamp(int(keyval[1])) self.blamechunk_time = datetime.date.fromtimestamp(int(keyval[1]))
elif keyval[0] == "filename": elif keyval[0] == "filename":
self.blamechunk_is_last = True self.blamechunk_is_last = True
elif Blame.is_revision(keyval[0]): elif Blame.is_revision(keyval[0]):
self.blamechunk_revision = keyval[0] self.blamechunk_revision = keyval[0]
__thread_lock__.release() # Lock controlling the number of threads running
__thread_lock__.release() # Lock controlling the number of threads running
PROGRESS_TEXT = N_("Checking how many rows belong to each author (2 of 2): {0:.0f}%") PROGRESS_TEXT = N_("Checking how many rows belong to each author (2 of 2): {0:.0f}%")
class Blame(object): class Blame(object):
def __init__(self, repo, hard, useweeks, changes): def __init__(self, repo, hard, useweeks, changes):
self.blames = {} self.blames = {}
ls_tree_p = subprocess.Popen(["git", "ls-tree", "--name-only", "-r", interval.get_ref()], ls_tree_p = subprocess.Popen(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) ["git", "ls-tree", "--name-only", "-r", interval.get_ref()], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
lines = ls_tree_p.communicate()[0].splitlines() )
ls_tree_p.stdout.close() lines = ls_tree_p.communicate()[0].splitlines()
ls_tree_p.stdout.close()
if ls_tree_p.returncode == 0: if ls_tree_p.returncode == 0:
progress_text = _(PROGRESS_TEXT) progress_text = _(PROGRESS_TEXT)
if repo != None: if repo is not None:
progress_text = "[%s] " % repo.name + progress_text progress_text = "[%s] " % repo.name + progress_text
for i, row in enumerate(lines): for i, row in enumerate(lines):
row = row.strip().decode("unicode_escape", "ignore") row = row.strip().decode("unicode_escape", "ignore")
row = row.encode("latin-1", "replace") row = row.encode("latin-1", "replace")
row = row.decode("utf-8", "replace").strip("\"").strip("'").strip() row = row.decode("utf-8", "replace").strip('"').strip("'").strip()
if FileDiff.get_extension(row) in extensions.get_located() and \ if (
FileDiff.is_valid_extension(row) and not filtering.set_filtered(FileDiff.get_filename(row)): FileDiff.get_extension(row) in extensions.get_located()
blame_command = [_f for _f in ["git", "blame", "--line-porcelain", "-w"] + \ and FileDiff.is_valid_extension(row)
(["-C", "-C", "-M"] if hard else []) + and not filtering.set_filtered(FileDiff.get_filename(row))
[interval.get_since(), interval.get_ref(), "--", row] if _f] ):
thread = BlameThread(useweeks, changes, blame_command, FileDiff.get_extension(row), blame_command = [
self.blames, row.strip()) _f
thread.daemon = True for _f in ["git", "blame", "--line-porcelain", "-w"]
thread.start() + (["-C", "-C", "-M"] if hard else [])
+ [interval.get_since(), interval.get_ref(), "--", row]
if _f
]
thread = BlameThread(
useweeks, changes, blame_command, FileDiff.get_extension(row), self.blames, row.strip()
)
thread.daemon = True
thread.start()
if format.is_interactive_format(): if format.is_interactive_format():
terminal.output_progress(progress_text, i, len(lines)) terminal.output_progress(progress_text, i, len(lines))
# Make sure all threads have completed. # Make sure all threads have completed.
for i in range(0, NUM_THREADS): for i in range(0, NUM_THREADS):
__thread_lock__.acquire() __thread_lock__.acquire()
# We also have to release them for future use. # We also have to release them for future use.
for i in range(0, NUM_THREADS): for i in range(0, NUM_THREADS):
__thread_lock__.release() __thread_lock__.release()
def __iadd__(self, other): def __iadd__(self, other):
try: try:
self.blames.update(other.blames) self.blames.update(other.blames)
return self; return self
except AttributeError: except AttributeError:
return other; return other
@staticmethod @staticmethod
def is_revision(string): def is_revision(string):
revision = re.search("([0-9a-f]{40})", string) revision = re.search("([0-9a-f]{40})", string)
if revision == None: if revision is None:
return False return False
return revision.group(1).strip() return revision.group(1).strip()
@staticmethod @staticmethod
def get_stability(author, blamed_rows, changes): def get_stability(author, blamed_rows, changes):
if author in changes.get_authorinfo_list(): if author in changes.get_authorinfo_list():
author_insertions = changes.get_authorinfo_list()[author].insertions author_insertions = changes.get_authorinfo_list()[author].insertions
return 100 if author_insertions == 0 else 100.0 * blamed_rows / author_insertions return 100 if author_insertions == 0 else 100.0 * blamed_rows / author_insertions
return 100 return 100
@staticmethod @staticmethod
def get_time(string): def get_time(string):
time = re.search(r" \(.*?(\d\d\d\d-\d\d-\d\d)", string) time = re.search(r" \(.*?(\d\d\d\d-\d\d-\d\d)", string)
return time.group(1).strip() return time.group(1).strip()
def get_summed_blames(self): def get_summed_blames(self):
summed_blames = {} summed_blames = {}
for i in list(self.blames.items()): for i in list(self.blames.items()):
if summed_blames.get(i[0][0], None) == None: if summed_blames.get(i[0][0], None) is None:
summed_blames[i[0][0]] = BlameEntry() summed_blames[i[0][0]] = BlameEntry()
summed_blames[i[0][0]].rows += i[1].rows summed_blames[i[0][0]].rows += i[1].rows
summed_blames[i[0][0]].skew += i[1].skew summed_blames[i[0][0]].skew += i[1].skew
summed_blames[i[0][0]].comments += i[1].comments summed_blames[i[0][0]].comments += i[1].comments
return summed_blames return summed_blames

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import bisect import bisect
import datetime import datetime
import multiprocessing import multiprocessing
@ -34,260 +33,291 @@ NUM_THREADS = multiprocessing.cpu_count()
__thread_lock__ = threading.BoundedSemaphore(NUM_THREADS) __thread_lock__ = threading.BoundedSemaphore(NUM_THREADS)
__changes_lock__ = threading.Lock() __changes_lock__ = threading.Lock()
class FileDiff(object): class FileDiff(object):
def __init__(self, string): def __init__(self, string):
commit_line = string.split("|") commit_line = string.split("|")
if commit_line.__len__() == 2: if commit_line.__len__() == 2:
self.name = commit_line[0].strip() self.name = commit_line[0].strip()
self.insertions = commit_line[1].count("+") self.insertions = commit_line[1].count("+")
self.deletions = commit_line[1].count("-") self.deletions = commit_line[1].count("-")
@staticmethod @staticmethod
def is_filediff_line(string): def is_filediff_line(string):
string = string.split("|") string = string.split("|")
return string.__len__() == 2 and string[1].find("Bin") == -1 and ('+' in string[1] or '-' in string[1]) return string.__len__() == 2 and string[1].find("Bin") == -1 and ("+" in string[1] or "-" in string[1])
@staticmethod @staticmethod
def get_extension(string): def get_extension(string):
string = string.split("|")[0].strip().strip("{}").strip("\"").strip("'") string = string.split("|")[0].strip().strip("{}").strip('"').strip("'")
return os.path.splitext(string)[1][1:] return os.path.splitext(string)[1][1:]
@staticmethod @staticmethod
def get_filename(string): def get_filename(string):
return string.split("|")[0].strip().strip("{}").strip("\"").strip("'") return string.split("|")[0].strip().strip("{}").strip('"').strip("'")
@staticmethod @staticmethod
def is_valid_extension(string): def is_valid_extension(string):
extension = FileDiff.get_extension(string) extension = FileDiff.get_extension(string)
for i in extensions.get():
if (extension == "" and i == "*") or extension == i or i == "**":
return True
return False
for i in extensions.get():
if (extension == "" and i == "*") or extension == i or i == '**':
return True
return False
class Commit(object): class Commit(object):
def __init__(self, string): def __init__(self, string):
self.filediffs = [] self.filediffs = []
commit_line = string.split("|") commit_line = string.split("|")
if commit_line.__len__() == 5: if commit_line.__len__() == 5:
self.timestamp = commit_line[0] self.timestamp = commit_line[0]
self.date = commit_line[1] self.date = commit_line[1]
self.sha = commit_line[2] self.sha = commit_line[2]
self.author = commit_line[3].strip() self.author = commit_line[3].strip()
self.email = commit_line[4].strip() self.email = commit_line[4].strip()
def __lt__(self, other): def __lt__(self, other):
return self.timestamp.__lt__(other.timestamp) # only used for sorting; we just consider the timestamp. return self.timestamp.__lt__(other.timestamp) # only used for sorting; we just consider the timestamp.
def add_filediff(self, filediff): def add_filediff(self, filediff):
self.filediffs.append(filediff) self.filediffs.append(filediff)
def get_filediffs(self): def get_filediffs(self):
return self.filediffs return self.filediffs
@staticmethod @staticmethod
def get_author_and_email(string): def get_author_and_email(string):
commit_line = string.split("|") commit_line = string.split("|")
if commit_line.__len__() == 5: if commit_line.__len__() == 5:
return (commit_line[3].strip(), commit_line[4].strip()) return (commit_line[3].strip(), commit_line[4].strip())
@staticmethod
def is_commit_line(string):
return string.split("|").__len__() == 5
@staticmethod
def is_commit_line(string):
return string.split("|").__len__() == 5
class AuthorInfo(object): class AuthorInfo(object):
email = None email = None
insertions = 0 insertions = 0
deletions = 0 deletions = 0
commits = 0 commits = 0
class ChangesThread(threading.Thread): class ChangesThread(threading.Thread):
def __init__(self, hard, changes, first_hash, second_hash, offset): def __init__(self, hard, changes, first_hash, second_hash, offset):
__thread_lock__.acquire() # Lock controlling the number of threads running __thread_lock__.acquire() # Lock controlling the number of threads running
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.hard = hard self.hard = hard
self.changes = changes self.changes = changes
self.first_hash = first_hash self.first_hash = first_hash
self.second_hash = second_hash self.second_hash = second_hash
self.offset = offset self.offset = offset
@staticmethod @staticmethod
def create(hard, changes, first_hash, second_hash, offset): def create(hard, changes, first_hash, second_hash, offset):
thread = ChangesThread(hard, changes, first_hash, second_hash, offset) thread = ChangesThread(hard, changes, first_hash, second_hash, offset)
thread.daemon = True thread.daemon = True
thread.start() thread.start()
def run(self): def run(self):
git_log_r = subprocess.Popen([_f for _f in ["git", "log", "--reverse", "--pretty=%ct|%cd|%H|%aN|%aE", git_log_r = subprocess.Popen(
"--stat=100000,8192", "--no-merges", "-w", interval.get_since(), [
interval.get_until(), "--date=short"] + (["-C", "-C", "-M"] if self.hard else []) + _f
[self.first_hash + self.second_hash] if _f], stdout=subprocess.PIPE).stdout for _f in [
lines = git_log_r.readlines() "git",
git_log_r.close() "log",
"--reverse",
"--pretty=%ct|%cd|%H|%aN|%aE",
"--stat=100000,8192",
"--no-merges",
"-w",
interval.get_since(),
interval.get_until(),
"--date=short",
]
+ (["-C", "-C", "-M"] if self.hard else [])
+ [self.first_hash + self.second_hash]
if _f
],
stdout=subprocess.PIPE,
).stdout
lines = git_log_r.readlines()
git_log_r.close()
commit = None commit = None
found_valid_extension = False found_valid_extension = False
is_filtered = False is_filtered = False
commits = [] commits = []
__changes_lock__.acquire() # Global lock used to protect calls from here... __changes_lock__.acquire() # Global lock used to protect calls from here...
for i in lines: for i in lines:
j = i.strip().decode("unicode_escape", "ignore") j = i.strip().decode("unicode_escape", "ignore")
j = j.encode("latin-1", "replace") j = j.encode("latin-1", "replace")
j = j.decode("utf-8", "replace") j = j.decode("utf-8", "replace")
if Commit.is_commit_line(j): if Commit.is_commit_line(j):
(author, email) = Commit.get_author_and_email(j) (author, email) = Commit.get_author_and_email(j)
self.changes.emails_by_author[author] = email self.changes.emails_by_author[author] = email
self.changes.authors_by_email[email] = author self.changes.authors_by_email[email] = author
if Commit.is_commit_line(j) or i is lines[-1]: if Commit.is_commit_line(j) or i is lines[-1]:
if found_valid_extension: if found_valid_extension:
bisect.insort(commits, commit) bisect.insort(commits, commit)
found_valid_extension = False found_valid_extension = False
is_filtered = False is_filtered = False
commit = Commit(j) commit = Commit(j)
if Commit.is_commit_line(j) and \ if Commit.is_commit_line(j) and (
(filtering.set_filtered(commit.author, "author") or \ filtering.set_filtered(commit.author, "author")
filtering.set_filtered(commit.email, "email") or \ or filtering.set_filtered(commit.email, "email")
filtering.set_filtered(commit.sha, "revision") or \ or filtering.set_filtered(commit.sha, "revision")
filtering.set_filtered(commit.sha, "message")): or filtering.set_filtered(commit.sha, "message")
is_filtered = True ):
is_filtered = True
if FileDiff.is_filediff_line(j) and not \ if FileDiff.is_filediff_line(j) and not filtering.set_filtered(FileDiff.get_filename(j)) and not is_filtered:
filtering.set_filtered(FileDiff.get_filename(j)) and not is_filtered: extensions.add_located(FileDiff.get_extension(j))
extensions.add_located(FileDiff.get_extension(j))
if FileDiff.is_valid_extension(j): if FileDiff.is_valid_extension(j):
found_valid_extension = True found_valid_extension = True
filediff = FileDiff(j) filediff = FileDiff(j)
commit.add_filediff(filediff) commit.add_filediff(filediff)
self.changes.commits[self.offset // CHANGES_PER_THREAD] = commits
__changes_lock__.release() # ...to here.
__thread_lock__.release() # Lock controlling the number of threads running
self.changes.commits[self.offset // CHANGES_PER_THREAD] = commits
__changes_lock__.release() # ...to here.
__thread_lock__.release() # Lock controlling the number of threads running
PROGRESS_TEXT = N_("Fetching and calculating primary statistics (1 of 2): {0:.0f}%") PROGRESS_TEXT = N_("Fetching and calculating primary statistics (1 of 2): {0:.0f}%")
class Changes(object): class Changes(object):
authors = {} authors = {}
authors_dateinfo = {} authors_dateinfo = {}
authors_by_email = {} authors_by_email = {}
emails_by_author = {} emails_by_author = {}
def __init__(self, repo, hard): def __init__(self, repo, hard):
self.commits = [] self.commits = []
interval.set_ref("HEAD"); interval.set_ref("HEAD")
git_rev_list_p = subprocess.Popen([_f for _f in ["git", "rev-list", "--reverse", "--no-merges", git_rev_list_p = subprocess.Popen(
interval.get_since(), interval.get_until(), "HEAD"] if _f], [
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) _f
lines = git_rev_list_p.communicate()[0].splitlines() for _f in ["git", "rev-list", "--reverse", "--no-merges", interval.get_since(), interval.get_until(), "HEAD"]
git_rev_list_p.stdout.close() if _f
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
lines = git_rev_list_p.communicate()[0].splitlines()
git_rev_list_p.stdout.close()
if git_rev_list_p.returncode == 0 and len(lines) > 0: if git_rev_list_p.returncode == 0 and len(lines) > 0:
progress_text = _(PROGRESS_TEXT) progress_text = _(PROGRESS_TEXT)
if repo != None: if repo is not None:
progress_text = "[%s] " % repo.name + progress_text progress_text = "[%s] " % repo.name + progress_text
chunks = len(lines) // CHANGES_PER_THREAD chunks = len(lines) // CHANGES_PER_THREAD
self.commits = [None] * (chunks if len(lines) % CHANGES_PER_THREAD == 0 else chunks + 1) self.commits = [None] * (chunks if len(lines) % CHANGES_PER_THREAD == 0 else chunks + 1)
first_hash = "" first_hash = ""
for i, entry in enumerate(lines): for i, entry in enumerate(lines):
if i % CHANGES_PER_THREAD == CHANGES_PER_THREAD - 1: if i % CHANGES_PER_THREAD == CHANGES_PER_THREAD - 1:
entry = entry.decode("utf-8", "replace").strip() entry = entry.decode("utf-8", "replace").strip()
second_hash = entry second_hash = entry
ChangesThread.create(hard, self, first_hash, second_hash, i) ChangesThread.create(hard, self, first_hash, second_hash, i)
first_hash = entry + ".." first_hash = entry + ".."
if format.is_interactive_format(): if format.is_interactive_format():
terminal.output_progress(progress_text, i, len(lines)) terminal.output_progress(progress_text, i, len(lines))
else: else:
if CHANGES_PER_THREAD - 1 != i % CHANGES_PER_THREAD: if CHANGES_PER_THREAD - 1 != i % CHANGES_PER_THREAD:
entry = entry.decode("utf-8", "replace").strip() entry = entry.decode("utf-8", "replace").strip()
second_hash = entry second_hash = entry
ChangesThread.create(hard, self, first_hash, second_hash, i) ChangesThread.create(hard, self, first_hash, second_hash, i)
# Make sure all threads have completed. # Make sure all threads have completed.
for i in range(0, NUM_THREADS): for i in range(0, NUM_THREADS):
__thread_lock__.acquire() __thread_lock__.acquire()
# We also have to release them for future use. # We also have to release them for future use.
for i in range(0, NUM_THREADS): for i in range(0, NUM_THREADS):
__thread_lock__.release() __thread_lock__.release()
self.commits = [item for sublist in self.commits for item in sublist] self.commits = [item for sublist in self.commits for item in sublist]
if len(self.commits) > 0: if len(self.commits) > 0:
if interval.has_interval(): if interval.has_interval():
interval.set_ref(self.commits[-1].sha) interval.set_ref(self.commits[-1].sha)
self.first_commit_date = datetime.date(int(self.commits[0].date[0:4]), int(self.commits[0].date[5:7]), self.first_commit_date = datetime.date(
int(self.commits[0].date[8:10])) int(self.commits[0].date[0:4]), int(self.commits[0].date[5:7]), int(self.commits[0].date[8:10])
self.last_commit_date = datetime.date(int(self.commits[-1].date[0:4]), int(self.commits[-1].date[5:7]), )
int(self.commits[-1].date[8:10])) self.last_commit_date = datetime.date(
int(self.commits[-1].date[0:4]), int(self.commits[-1].date[5:7]), int(self.commits[-1].date[8:10])
)
def __iadd__(self, other): def __iadd__(self, other):
try: try:
self.authors.update(other.authors) self.authors.update(other.authors)
self.authors_dateinfo.update(other.authors_dateinfo) self.authors_dateinfo.update(other.authors_dateinfo)
self.authors_by_email.update(other.authors_by_email) self.authors_by_email.update(other.authors_by_email)
self.emails_by_author.update(other.emails_by_author) self.emails_by_author.update(other.emails_by_author)
for commit in other.commits: for commit in other.commits:
bisect.insort(self.commits, commit) bisect.insort(self.commits, commit)
if not self.commits and not other.commits: if not self.commits and not other.commits:
self.commits = [] self.commits = []
return self return self
except AttributeError: except AttributeError:
return other return other
def get_commits(self): def get_commits(self):
return self.commits return self.commits
@staticmethod @staticmethod
def modify_authorinfo(authors, key, commit): def modify_authorinfo(authors, key, commit):
if authors.get(key, None) == None: if authors.get(key, None) is None:
authors[key] = AuthorInfo() authors[key] = AuthorInfo()
if commit.get_filediffs(): if commit.get_filediffs():
authors[key].commits += 1 authors[key].commits += 1
for j in commit.get_filediffs(): for j in commit.get_filediffs():
authors[key].insertions += j.insertions authors[key].insertions += j.insertions
authors[key].deletions += j.deletions authors[key].deletions += j.deletions
def get_authorinfo_list(self): def get_authorinfo_list(self):
if not self.authors: if not self.authors:
for i in self.commits: for i in self.commits:
Changes.modify_authorinfo(self.authors, i.author, i) Changes.modify_authorinfo(self.authors, i.author, i)
return self.authors return self.authors
def get_authordateinfo_list(self): def get_authordateinfo_list(self):
if not self.authors_dateinfo: if not self.authors_dateinfo:
for i in self.commits: for i in self.commits:
Changes.modify_authorinfo(self.authors_dateinfo, (i.date, i.author), i) Changes.modify_authorinfo(self.authors_dateinfo, (i.date, i.author), i)
return self.authors_dateinfo return self.authors_dateinfo
def get_latest_author_by_email(self, name): def get_latest_author_by_email(self, name):
if not hasattr(name, "decode"): if not hasattr(name, "decode"):
name = str.encode(name) name = str.encode(name)
try: try:
name = name.decode("unicode_escape", "ignore") name = name.decode("unicode_escape", "ignore")
except UnicodeEncodeError: except UnicodeEncodeError:
pass pass
return self.authors_by_email[name] return self.authors_by_email[name]
def get_latest_email_by_author(self, name): def get_latest_email_by_author(self, name):
return self.emails_by_author[name] return self.emails_by_author[name]

View File

@ -25,34 +25,41 @@ import sys
import tempfile import tempfile
try: try:
from urllib.parse import urlparse from urllib.parse import urlparse
except: except:
from urllib.parse import urlparse from urllib.parse import urlparse
__cloned_paths__ = [] __cloned_paths__ = []
def create(url): def create(url):
class Repository(object): class Repository(object):
def __init__(self, name, location): def __init__(self, name, location):
self.name = name self.name = name
self.location = location self.location = location
parsed_url = urlparse(url) parsed_url = urlparse(url)
if parsed_url.scheme == "file" or parsed_url.scheme == "git" or parsed_url.scheme == "http" or \ if (
parsed_url.scheme == "https" or parsed_url.scheme == "ssh": parsed_url.scheme == "file"
path = tempfile.mkdtemp(suffix=".gitinspector") or parsed_url.scheme == "git"
git_clone = subprocess.Popen(["git", "clone", url, path], stdout=sys.stderr) or parsed_url.scheme == "http"
git_clone.wait() or parsed_url.scheme == "https"
or parsed_url.scheme == "ssh"
):
path = tempfile.mkdtemp(suffix=".gitinspector")
git_clone = subprocess.Popen(["git", "clone", url, path], stdout=sys.stderr)
git_clone.wait()
if git_clone.returncode != 0: if git_clone.returncode != 0:
sys.exit(git_clone.returncode) sys.exit(git_clone.returncode)
__cloned_paths__.append(path) __cloned_paths__.append(path)
return Repository(os.path.basename(parsed_url.path), path) return Repository(os.path.basename(parsed_url.path), path)
return Repository(None, os.path.abspath(url))
return Repository(None, os.path.abspath(url))
def delete(): def delete():
for path in __cloned_paths__: for path in __cloned_paths__:
shutil.rmtree(path, ignore_errors=True) shutil.rmtree(path, ignore_errors=True)

View File

@ -18,61 +18,139 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
__comment_begining__ = {
"java": "/*",
"c": "/*",
"cc": "/*",
"cpp": "/*",
"cs": "/*",
"h": "/*",
"hh": "/*",
"hpp": "/*",
"hs": "{-",
"html": "<!--",
"php": "/*",
"py": '"""',
"glsl": "/*",
"rb": "=begin",
"js": "/*",
"jspx": "<!--",
"scala": "/*",
"sql": "/*",
"tex": "\\begin{comment}",
"xhtml": "<!--",
"xml": "<!--",
"ml": "(*",
"mli": "(*",
"go": "/*",
"ly": "%{",
"ily": "%{",
}
__comment_begining__ = {"java": "/*", "c": "/*", "cc": "/*", "cpp": "/*", "cs": "/*", "h": "/*", "hh": "/*", "hpp": "/*", __comment_end__ = {
"hs": "{-", "html": "<!--", "php": "/*", "py": "\"\"\"", "glsl": "/*", "rb": "=begin", "js": "/*", "java": "*/",
"jspx": "<!--", "scala": "/*", "sql": "/*", "tex": "\\begin{comment}", "xhtml": "<!--", "c": "*/",
"xml": "<!--", "ml": "(*", "mli": "(*", "go": "/*", "ly": "%{", "ily": "%{"} "cc": "*/",
"cpp": "*/",
"cs": "*/",
"h": "*/",
"hh": "*/",
"hpp": "*/",
"hs": "-}",
"html": "-->",
"php": "*/",
"py": '"""',
"glsl": "*/",
"rb": "=end",
"js": "*/",
"jspx": "-->",
"scala": "*/",
"sql": "*/",
"tex": "\\end{comment}",
"xhtml": "-->",
"xml": "-->",
"ml": "*)",
"mli": "*)",
"go": "*/",
"ly": "%}",
"ily": "%}",
}
__comment_end__ = {"java": "*/", "c": "*/", "cc": "*/", "cpp": "*/", "cs": "*/", "h": "*/", "hh": "*/", "hpp": "*/", "hs": "-}", __comment__ = {
"html": "-->", "php": "*/", "py": "\"\"\"", "glsl": "*/", "rb": "=end", "js": "*/", "jspx": "-->", "java": "//",
"scala": "*/", "sql": "*/", "tex": "\\end{comment}", "xhtml": "-->", "xml": "-->", "ml": "*)", "mli": "*)", "c": "//",
"go": "*/", "ly": "%}", "ily": "%}"} "cc": "//",
"cpp": "//",
__comment__ = {"java": "//", "c": "//", "cc": "//", "cpp": "//", "cs": "//", "h": "//", "hh": "//", "hpp": "//", "hs": "--", "cs": "//",
"pl": "#", "php": "//", "py": "#", "glsl": "//", "rb": "#", "robot": "#", "rs": "//", "rlib": "//", "js": "//", "h": "//",
"scala": "//", "sql": "--", "tex": "%", "ada": "--", "ads": "--", "adb": "--", "pot": "#", "po": "#", "go": "//", "hh": "//",
"ly": "%", "ily": "%"} "hpp": "//",
"hs": "--",
"pl": "#",
"php": "//",
"py": "#",
"glsl": "//",
"rb": "#",
"robot": "#",
"rs": "//",
"rlib": "//",
"js": "//",
"scala": "//",
"sql": "--",
"tex": "%",
"ada": "--",
"ads": "--",
"adb": "--",
"pot": "#",
"po": "#",
"go": "//",
"ly": "%",
"ily": "%",
}
__comment_markers_must_be_at_begining__ = {"tex": True} __comment_markers_must_be_at_begining__ = {"tex": True}
def __has_comment_begining__(extension, string):
if __comment_markers_must_be_at_begining__.get(extension, None) == True:
return string.find(__comment_begining__[extension]) == 0
elif __comment_begining__.get(extension, None) != None and string.find(__comment_end__[extension], 2) == -1:
return string.find(__comment_begining__[extension]) != -1
return False def __has_comment_begining__(extension, string):
if __comment_markers_must_be_at_begining__.get(extension, None):
return string.find(__comment_begining__[extension]) == 0
elif __comment_begining__.get(extension, None) is not None and string.find(__comment_end__[extension], 2) == -1:
return string.find(__comment_begining__[extension]) != -1
return False
def __has_comment_end__(extension, string): def __has_comment_end__(extension, string):
if __comment_markers_must_be_at_begining__.get(extension, None) == True: if __comment_markers_must_be_at_begining__.get(extension, None):
return string.find(__comment_end__[extension]) == 0 return string.find(__comment_end__[extension]) == 0
elif __comment_end__.get(extension, None) != None: elif __comment_end__.get(extension, None) is not None:
return string.find(__comment_end__[extension]) != -1 return string.find(__comment_end__[extension]) != -1
return False
return False
def is_comment(extension, string): def is_comment(extension, string):
if __comment_begining__.get(extension, None) != None and string.strip().startswith(__comment_begining__[extension]): if __comment_begining__.get(extension, None) is not None and string.strip().startswith(__comment_begining__[extension]):
return True return True
if __comment_end__.get(extension, None) != None and string.strip().endswith(__comment_end__[extension]): if __comment_end__.get(extension, None) is not None and string.strip().endswith(__comment_end__[extension]):
return True return True
if __comment__.get(extension, None) != None and string.strip().startswith(__comment__[extension]): if __comment__.get(extension, None) is not None and string.strip().startswith(__comment__[extension]):
return True return True
return False
return False
def handle_comment_block(is_inside_comment, extension, content): def handle_comment_block(is_inside_comment, extension, content):
comments = 0 comments = 0
if is_comment(extension, content): if is_comment(extension, content):
comments += 1 comments += 1
if is_inside_comment: if is_inside_comment:
if __has_comment_end__(extension, content): if __has_comment_end__(extension, content):
is_inside_comment = False is_inside_comment = False
else: else:
comments += 1 comments += 1
elif __has_comment_begining__(extension, content) and not __has_comment_end__(extension, content): elif __has_comment_begining__(extension, content) and not __has_comment_end__(extension, content):
is_inside_comment = True is_inside_comment = True
return (comments, is_inside_comment) return (comments, is_inside_comment)

View File

@ -22,72 +22,75 @@ import os
import subprocess import subprocess
from . import extensions, filtering, format, interval, optval from . import extensions, filtering, format, interval, optval
class GitConfig(object): class GitConfig(object):
def __init__(self, run, repo, global_only=False): def __init__(self, run, repo, global_only=False):
self.run = run self.run = run
self.repo = repo self.repo = repo
self.global_only = global_only self.global_only = global_only
def __read_git_config__(self, variable): def __read_git_config__(self, variable):
previous_directory = os.getcwd() previous_directory = os.getcwd()
os.chdir(self.repo) os.chdir(self.repo)
setting = subprocess.Popen([_f for _f in ["git", "config", "--global" if self.global_only else "", setting = subprocess.Popen(
"inspector." + variable] if _f], stdout=subprocess.PIPE).stdout [_f for _f in ["git", "config", "--global" if self.global_only else "", "inspector." + variable] if _f],
os.chdir(previous_directory) stdout=subprocess.PIPE,
).stdout
os.chdir(previous_directory)
try: try:
setting = setting.readlines()[0] setting = setting.readlines()[0]
setting = setting.decode("utf-8", "replace").strip() setting = setting.decode("utf-8", "replace").strip()
except IndexError: except IndexError:
setting = "" setting = ""
return setting return setting
def __read_git_config_bool__(self, variable): def __read_git_config_bool__(self, variable):
try: try:
variable = self.__read_git_config__(variable) variable = self.__read_git_config__(variable)
return optval.get_boolean_argument(False if variable == "" else variable) return optval.get_boolean_argument(False if variable == "" else variable)
except optval.InvalidOptionArgument: except optval.InvalidOptionArgument:
return False return False
def __read_git_config_string__(self, variable): def __read_git_config_string__(self, variable):
string = self.__read_git_config__(variable) string = self.__read_git_config__(variable)
return (True, string) if len(string) > 0 else (False, None) return (True, string) if len(string) > 0 else (False, None)
def read(self): def read(self):
var = self.__read_git_config_string__("file-types") var = self.__read_git_config_string__("file-types")
if var[0]: if var[0]:
extensions.define(var[1]) extensions.define(var[1])
var = self.__read_git_config_string__("exclude") var = self.__read_git_config_string__("exclude")
if var[0]: if var[0]:
filtering.add(var[1]) filtering.add(var[1])
var = self.__read_git_config_string__("format") var = self.__read_git_config_string__("format")
if var[0] and not format.select(var[1]): if var[0] and not format.select(var[1]):
raise format.InvalidFormatError(_("specified output format not supported.")) raise format.InvalidFormatError(_("specified output format not supported."))
self.run.hard = self.__read_git_config_bool__("hard") self.run.hard = self.__read_git_config_bool__("hard")
self.run.list_file_types = self.__read_git_config_bool__("list-file-types") self.run.list_file_types = self.__read_git_config_bool__("list-file-types")
self.run.localize_output = self.__read_git_config_bool__("localize-output") self.run.localize_output = self.__read_git_config_bool__("localize-output")
self.run.metrics = self.__read_git_config_bool__("metrics") self.run.metrics = self.__read_git_config_bool__("metrics")
self.run.responsibilities = self.__read_git_config_bool__("responsibilities") self.run.responsibilities = self.__read_git_config_bool__("responsibilities")
self.run.useweeks = self.__read_git_config_bool__("weeks") self.run.useweeks = self.__read_git_config_bool__("weeks")
var = self.__read_git_config_string__("since") var = self.__read_git_config_string__("since")
if var[0]: if var[0]:
interval.set_since(var[1]) interval.set_since(var[1])
var = self.__read_git_config_string__("until") var = self.__read_git_config_string__("until")
if var[0]: if var[0]:
interval.set_until(var[1]) interval.set_until(var[1])
self.run.timeline = self.__read_git_config_bool__("timeline") self.run.timeline = self.__read_git_config_bool__("timeline")
if self.__read_git_config_bool__("grading"): if self.__read_git_config_bool__("grading"):
self.run.hard = True self.run.hard = True
self.run.list_file_types = True self.run.list_file_types = True
self.run.metrics = True self.run.metrics = True
self.run.responsibilities = True self.run.responsibilities = True
self.run.timeline = True self.run.timeline = True
self.run.useweeks = True self.run.useweeks = True

View File

@ -18,24 +18,27 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
DEFAULT_EXTENSIONS = ["java", "c", "cc", "cpp", "h", "hh", "hpp", "py", "glsl", "rb", "js", "sql"] DEFAULT_EXTENSIONS = ["java", "c", "cc", "cpp", "h", "hh", "hpp", "py", "glsl", "rb", "js", "sql"]
__extensions__ = DEFAULT_EXTENSIONS __extensions__ = DEFAULT_EXTENSIONS
__located_extensions__ = set() __located_extensions__ = set()
def get(): def get():
return __extensions__ return __extensions__
def define(string): def define(string):
global __extensions__ global __extensions__
__extensions__ = string.split(",") __extensions__ = string.split(",")
def add_located(string): def add_located(string):
if len(string) == 0: if len(string) == 0:
__located_extensions__.add("*") __located_extensions__.add("*")
else: else:
__located_extensions__.add(string) __located_extensions__.add(string)
def get_located(): def get_located():
return __located_extensions__ return __located_extensions__

View File

@ -21,69 +21,84 @@
import re import re
import subprocess import subprocess
__filters__ = {"file": [set(), set()], "author": [set(), set()], "email": [set(), set()], "revision": [set(), set()], __filters__ = {
"message" : [set(), None]} "file": [set(), set()],
"author": [set(), set()],
"email": [set(), set()],
"revision": [set(), set()],
"message": [set(), None],
}
class InvalidRegExpError(ValueError): class InvalidRegExpError(ValueError):
def __init__(self, msg): def __init__(self, msg):
super(InvalidRegExpError, self).__init__(msg) super(InvalidRegExpError, self).__init__(msg)
self.msg = msg self.msg = msg
def get(): def get():
return __filters__ return __filters__
def __add_one__(string): def __add_one__(string):
for i in __filters__: for i in __filters__:
if (i + ":").lower() == string[0:len(i) + 1].lower(): if (i + ":").lower() == string[0:len(i) + 1].lower():
__filters__[i][0].add(string[len(i) + 1:]) __filters__[i][0].add(string[len(i) + 1:])
return return
__filters__["file"][0].add(string) __filters__["file"][0].add(string)
def add(string): def add(string):
rules = string.split(",") rules = string.split(",")
for rule in rules: for rule in rules:
__add_one__(rule) __add_one__(rule)
def clear(): def clear():
for i in __filters__: for i in __filters__:
__filters__[i][0] = set() __filters__[i][0] = set()
def get_filered(filter_type="file"): def get_filered(filter_type="file"):
return __filters__[filter_type][1] return __filters__[filter_type][1]
def has_filtered(): def has_filtered():
for i in __filters__: for i in __filters__:
if __filters__[i][1]: if __filters__[i][1]:
return True return True
return False return False
def __find_commit_message__(sha): def __find_commit_message__(sha):
git_show_r = subprocess.Popen([_f for _f in ["git", "show", "-s", "--pretty=%B", "-w", sha] if _f], git_show_r = subprocess.Popen(
stdout=subprocess.PIPE).stdout [_f for _f in ["git", "show", "-s", "--pretty=%B", "-w", sha] if _f], stdout=subprocess.PIPE
).stdout
commit_message = git_show_r.read() commit_message = git_show_r.read()
git_show_r.close() git_show_r.close()
commit_message = commit_message.strip().decode("unicode_escape", "ignore")
commit_message = commit_message.encode("latin-1", "replace")
return commit_message.decode("utf-8", "replace")
commit_message = commit_message.strip().decode("unicode_escape", "ignore")
commit_message = commit_message.encode("latin-1", "replace")
return commit_message.decode("utf-8", "replace")
def set_filtered(string, filter_type="file"): def set_filtered(string, filter_type="file"):
string = string.strip() string = string.strip()
if len(string) > 0: if len(string) > 0:
for i in __filters__[filter_type][0]: for i in __filters__[filter_type][0]:
search_for = string search_for = string
if filter_type == "message": if filter_type == "message":
search_for = __find_commit_message__(string) search_for = __find_commit_message__(string)
try: try:
if re.search(i, search_for) != None: if re.search(i, search_for) is not None:
if filter_type == "message": if filter_type == "message":
__add_one__("revision:" + string) __add_one__("revision:" + string)
else: else:
__filters__[filter_type][1].add(string) __filters__[filter_type][1].add(string)
return True return True
except: except:
raise InvalidRegExpError(_("invalid regular expression specified")) raise InvalidRegExpError(_("invalid regular expression specified"))
return False return False

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import base64 import base64
import os import os
import textwrap import textwrap
@ -33,122 +32,142 @@ DEFAULT_FORMAT = __available_formats__[3]
__selected_format__ = DEFAULT_FORMAT __selected_format__ = DEFAULT_FORMAT
class InvalidFormatError(Exception): class InvalidFormatError(Exception):
def __init__(self, msg): def __init__(self, msg):
super(InvalidFormatError, self).__init__(msg) super(InvalidFormatError, self).__init__(msg)
self.msg = msg self.msg = msg
def select(format): def select(format):
global __selected_format__ global __selected_format__
__selected_format__ = format __selected_format__ = format
return format in __available_formats__
return format in __available_formats__
def get_selected(): def get_selected():
return __selected_format__ return __selected_format__
def is_interactive_format(): def is_interactive_format():
return __selected_format__ == "text" return __selected_format__ == "text"
def __output_html_template__(name): def __output_html_template__(name):
template_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), name) template_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), name)
file_r = open(template_path, "rb") file_r = open(template_path, "rb")
template = file_r.read().decode("utf-8", "replace") template = file_r.read().decode("utf-8", "replace")
file_r.close()
return template
file_r.close()
return template
def __get_zip_file_content__(name, file_name="/html/flot.zip"): def __get_zip_file_content__(name, file_name="/html/flot.zip"):
zip_file = zipfile.ZipFile(basedir.get_basedir() + file_name, "r") zip_file = zipfile.ZipFile(basedir.get_basedir() + file_name, "r")
content = zip_file.read(name) content = zip_file.read(name)
zip_file.close()
return content.decode("utf-8", "replace")
zip_file.close()
return content.decode("utf-8", "replace")
INFO_ONE_REPOSITORY = N_("Statistical information for the repository '{0}' was gathered on {1}.") INFO_ONE_REPOSITORY = N_("Statistical information for the repository '{0}' was gathered on {1}.")
INFO_MANY_REPOSITORIES = N_("Statistical information for the repositories '{0}' was gathered on {1}.") INFO_MANY_REPOSITORIES = N_("Statistical information for the repositories '{0}' was gathered on {1}.")
def output_header(repos): def output_header(repos):
repos_string = ", ".join([repo.name for repo in repos]) repos_string = ", ".join([repo.name for repo in repos])
if __selected_format__ == "html" or __selected_format__ == "htmlembedded": if __selected_format__ == "html" or __selected_format__ == "htmlembedded":
base = basedir.get_basedir() base = basedir.get_basedir()
html_header = __output_html_template__(base + "/html/html.header") html_header = __output_html_template__(base + "/html/html.header")
tablesorter_js = __get_zip_file_content__("jquery.tablesorter.min.js", tablesorter_js = __get_zip_file_content__("jquery.tablesorter.min.js", "/html/jquery.tablesorter.min.js.zip").encode(
"/html/jquery.tablesorter.min.js.zip").encode("latin-1", "replace") "latin-1", "replace"
tablesorter_js = tablesorter_js.decode("utf-8", "ignore") )
flot_js = __get_zip_file_content__("jquery.flot.js") tablesorter_js = tablesorter_js.decode("utf-8", "ignore")
pie_js = __get_zip_file_content__("jquery.flot.pie.js") flot_js = __get_zip_file_content__("jquery.flot.js")
resize_js = __get_zip_file_content__("jquery.flot.resize.js") pie_js = __get_zip_file_content__("jquery.flot.pie.js")
resize_js = __get_zip_file_content__("jquery.flot.resize.js")
logo_file = open(base + "/html/gitinspector_piclet.png", "rb") logo_file = open(base + "/html/gitinspector_piclet.png", "rb")
logo = logo_file.read() logo = logo_file.read()
logo_file.close() logo_file.close()
logo = base64.b64encode(logo) logo = base64.b64encode(logo)
if __selected_format__ == "htmlembedded": if __selected_format__ == "htmlembedded":
jquery_js = ">" + __get_zip_file_content__("jquery.js") jquery_js = ">" + __get_zip_file_content__("jquery.js")
else: else:
jquery_js = " src=\"https://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js\">" jquery_js = ' src="https://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js">'
print(html_header.format(title=_("Repository statistics for '{0}'").format(repos_string), print(
jquery=jquery_js, html_header.format(
jquery_tablesorter=tablesorter_js, title=_("Repository statistics for '{0}'").format(repos_string),
jquery_flot=flot_js, jquery=jquery_js,
jquery_flot_pie=pie_js, jquery_tablesorter=tablesorter_js,
jquery_flot_resize=resize_js, jquery_flot=flot_js,
logo=logo.decode("utf-8", "replace"), jquery_flot_pie=pie_js,
logo_text=_("The output has been generated by {0} {1}. The statistical analysis tool" jquery_flot_resize=resize_js,
" for git repositories.").format( logo=logo.decode("utf-8", "replace"),
"<a href=\"https://github.com/ejwa/gitinspector\">gitinspector</a>", logo_text=_(
version.__version__), "The output has been generated by {0} {1}. The statistical analysis tool" " for git repositories."
repo_text=_(INFO_ONE_REPOSITORY if len(repos) <= 1 else INFO_MANY_REPOSITORIES).format( ).format('<a href="https://github.com/ejwa/gitinspector">gitinspector</a>', version.__version__),
repos_string, localization.get_date()), repo_text=_(INFO_ONE_REPOSITORY if len(repos) <= 1 else INFO_MANY_REPOSITORIES).format(
show_minor_authors=_("Show minor authors"), repos_string, localization.get_date()
hide_minor_authors=_("Hide minor authors"), ),
show_minor_rows=_("Show rows with minor work"), show_minor_authors=_("Show minor authors"),
hide_minor_rows=_("Hide rows with minor work"))) hide_minor_authors=_("Hide minor authors"),
elif __selected_format__ == "json": show_minor_rows=_("Show rows with minor work"),
print("{\n\t\"gitinspector\": {") hide_minor_rows=_("Hide rows with minor work"),
print("\t\t\"version\": \"" + version.__version__ + "\",") )
)
elif __selected_format__ == "json":
print('{\n\t"gitinspector": {')
print('\t\t"version": "' + version.__version__ + '",')
if len(repos) <= 1: if len(repos) <= 1:
print("\t\t\"repository\": \"" + repos_string + "\",") print('\t\t"repository": "' + repos_string + '",')
else: else:
repos_json = "\t\t\"repositories\": [ " repos_json = '\t\t"repositories": [ '
for repo in repos: for repo in repos:
repos_json += "\"" + repo.name + "\", " repos_json += '"' + repo.name + '", '
print(repos_json[:-2] + " ],") print(repos_json[:-2] + " ],")
print("\t\t\"report_date\": \"" + time.strftime("%Y/%m/%d") + "\",") print('\t\t"report_date": "' + time.strftime("%Y/%m/%d") + '",')
elif __selected_format__ == "xml": elif __selected_format__ == "xml":
print("<gitinspector>") print("<gitinspector>")
print("\t<version>" + version.__version__ + "</version>") print("\t<version>" + version.__version__ + "</version>")
if len(repos) <= 1: if len(repos) <= 1:
print("\t<repository>" + repos_string + "</repository>") print("\t<repository>" + repos_string + "</repository>")
else: else:
print("\t<repositories>") print("\t<repositories>")
for repo in repos: for repo in repos:
print("\t\t<repository>" + repo.name + "</repository>") print("\t\t<repository>" + repo.name + "</repository>")
print("\t</repositories>") print("\t</repositories>")
print("\t<report-date>" + time.strftime("%Y/%m/%d") + "</report-date>")
else:
print(
textwrap.fill(
_(INFO_ONE_REPOSITORY if len(repos) <= 1 else INFO_MANY_REPOSITORIES).format(
repos_string, localization.get_date()
),
width=terminal.get_size()[0],
)
)
print("\t<report-date>" + time.strftime("%Y/%m/%d") + "</report-date>")
else:
print(textwrap.fill(_(INFO_ONE_REPOSITORY if len(repos) <= 1 else INFO_MANY_REPOSITORIES).format(
repos_string, localization.get_date()), width=terminal.get_size()[0]))
def output_footer(): def output_footer():
if __selected_format__ == "html" or __selected_format__ == "htmlembedded": if __selected_format__ == "html" or __selected_format__ == "htmlembedded":
base = basedir.get_basedir() base = basedir.get_basedir()
html_footer = __output_html_template__(base + "/html/html.footer") html_footer = __output_html_template__(base + "/html/html.footer")
print(html_footer) print(html_footer)
elif __selected_format__ == "json": elif __selected_format__ == "json":
print("\n\t}\n}") print("\n\t}\n}")
elif __selected_format__ == "xml": elif __selected_format__ == "xml":
print("</gitinspector>") print("</gitinspector>")

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import atexit import atexit
import getopt import getopt
import os import os
@ -27,8 +26,7 @@ from .blame import Blame
from .changes import Changes from .changes import Changes
from .config import GitConfig from .config import GitConfig
from .metrics import MetricsLogic from .metrics import MetricsLogic
from . import (basedir, clone, extensions, filtering, format, help, interval, from . import basedir, clone, extensions, filtering, format, help, interval, localization, optval, terminal, version
localization, optval, terminal, version)
from .output import outputable from .output import outputable
from .output.blameoutput import BlameOutput from .output.blameoutput import BlameOutput
from .output.changesoutput import ChangesOutput from .output.changesoutput import ChangesOutput
@ -40,179 +38,202 @@ from .output.timelineoutput import TimelineOutput
localization.init() localization.init()
class Runner(object): class Runner(object):
def __init__(self): def __init__(self):
self.hard = False self.hard = False
self.include_metrics = False self.include_metrics = False
self.list_file_types = False self.list_file_types = False
self.localize_output = False self.localize_output = False
self.responsibilities = False self.responsibilities = False
self.grading = False self.grading = False
self.timeline = False self.timeline = False
self.useweeks = False self.useweeks = False
def process(self, repos): def process(self, repos):
localization.check_compatibility(version.__version__) localization.check_compatibility(version.__version__)
if not self.localize_output: if not self.localize_output:
localization.disable() localization.disable()
terminal.skip_escapes(not sys.stdout.isatty()) terminal.skip_escapes(not sys.stdout.isatty())
terminal.set_stdout_encoding() terminal.set_stdout_encoding()
previous_directory = os.getcwd() previous_directory = os.getcwd()
summed_blames = Blame.__new__(Blame) summed_blames = Blame.__new__(Blame)
summed_changes = Changes.__new__(Changes) summed_changes = Changes.__new__(Changes)
summed_metrics = MetricsLogic.__new__(MetricsLogic) summed_metrics = MetricsLogic.__new__(MetricsLogic)
for repo in repos: for repo in repos:
os.chdir(repo.location) os.chdir(repo.location)
repo = repo if len(repos) > 1 else None repo = repo if len(repos) > 1 else None
changes = Changes(repo, self.hard) changes = Changes(repo, self.hard)
summed_blames += Blame(repo, self.hard, self.useweeks, changes) summed_blames += Blame(repo, self.hard, self.useweeks, changes)
summed_changes += changes summed_changes += changes
if self.include_metrics: if self.include_metrics:
summed_metrics += MetricsLogic() summed_metrics += MetricsLogic()
if sys.stdout.isatty() and format.is_interactive_format(): if sys.stdout.isatty() and format.is_interactive_format():
terminal.clear_row() terminal.clear_row()
else: else:
os.chdir(previous_directory) os.chdir(previous_directory)
format.output_header(repos) format.output_header(repos)
outputable.output(ChangesOutput(summed_changes)) outputable.output(ChangesOutput(summed_changes))
if summed_changes.get_commits(): if summed_changes.get_commits():
outputable.output(BlameOutput(summed_changes, summed_blames)) outputable.output(BlameOutput(summed_changes, summed_blames))
if self.timeline: if self.timeline:
outputable.output(TimelineOutput(summed_changes, self.useweeks)) outputable.output(TimelineOutput(summed_changes, self.useweeks))
if self.include_metrics: if self.include_metrics:
outputable.output(MetricsOutput(summed_metrics)) outputable.output(MetricsOutput(summed_metrics))
if self.responsibilities: if self.responsibilities:
outputable.output(ResponsibilitiesOutput(summed_changes, summed_blames)) outputable.output(ResponsibilitiesOutput(summed_changes, summed_blames))
outputable.output(FilteringOutput()) outputable.output(FilteringOutput())
if self.list_file_types: if self.list_file_types:
outputable.output(ExtensionsOutput()) outputable.output(ExtensionsOutput())
format.output_footer()
os.chdir(previous_directory)
format.output_footer()
os.chdir(previous_directory)
def __check_python_version__(): def __check_python_version__():
if sys.version_info < (2, 6): if sys.version_info < (2, 6):
python_version = str(sys.version_info[0]) + "." + str(sys.version_info[1]) python_version = str(sys.version_info[0]) + "." + str(sys.version_info[1])
sys.exit(_("gitinspector requires at least Python 2.6 to run (version {0} was found).").format(python_version)) sys.exit(_("gitinspector requires at least Python 2.6 to run (version {0} was found).").format(python_version))
def __get_validated_git_repos__(repos_relative): def __get_validated_git_repos__(repos_relative):
if not repos_relative: if not repos_relative:
repos_relative = "." repos_relative = "."
repos = [] repos = []
#Try to clone the repos or return the same directory and bail out. # Try to clone the repos or return the same directory and bail out.
for repo in repos_relative: for repo in repos_relative:
cloned_repo = clone.create(repo) cloned_repo = clone.create(repo)
if cloned_repo.name == None: if cloned_repo.name is None:
cloned_repo.location = basedir.get_basedir_git(cloned_repo.location) cloned_repo.location = basedir.get_basedir_git(cloned_repo.location)
cloned_repo.name = os.path.basename(cloned_repo.location) cloned_repo.name = os.path.basename(cloned_repo.location)
repos.append(cloned_repo) repos.append(cloned_repo)
return repos
return repos
def main(): def main():
terminal.check_terminal_encoding() terminal.check_terminal_encoding()
terminal.set_stdin_encoding() terminal.set_stdin_encoding()
argv = terminal.convert_command_line_to_utf8() argv = terminal.convert_command_line_to_utf8()
run = Runner() run = Runner()
repos = [] repos = []
try: try:
opts, args = optval.gnu_getopt(argv[1:], "f:F:hHlLmrTwx:", ["exclude=", "file-types=", "format=", opts, args = optval.gnu_getopt(
"hard:true", "help", "list-file-types:true", "localize-output:true", argv[1:],
"metrics:true", "responsibilities:true", "since=", "grading:true", "f:F:hHlLmrTwx:",
"timeline:true", "until=", "version", "weeks:true"]) [
repos = __get_validated_git_repos__(set(args)) "exclude=",
"file-types=",
"format=",
"hard:true",
"help",
"list-file-types:true",
"localize-output:true",
"metrics:true",
"responsibilities:true",
"since=",
"grading:true",
"timeline:true",
"until=",
"version",
"weeks:true",
],
)
repos = __get_validated_git_repos__(set(args))
#We need the repos above to be set before we read the git config. # We need the repos above to be set before we read the git config.
GitConfig(run, repos[-1].location).read() GitConfig(run, repos[-1].location).read()
clear_x_on_next_pass = True clear_x_on_next_pass = True
for o, a in opts: for o, a in opts:
if o in("-h", "--help"): if o in ("-h", "--help"):
help.output() help.output()
sys.exit(0) sys.exit(0)
elif o in("-f", "--file-types"): elif o in ("-f", "--file-types"):
extensions.define(a) extensions.define(a)
elif o in("-F", "--format"): elif o in ("-F", "--format"):
if not format.select(a): if not format.select(a):
raise format.InvalidFormatError(_("specified output format not supported.")) raise format.InvalidFormatError(_("specified output format not supported."))
elif o == "-H": elif o == "-H":
run.hard = True run.hard = True
elif o == "--hard": elif o == "--hard":
run.hard = optval.get_boolean_argument(a) run.hard = optval.get_boolean_argument(a)
elif o == "-l": elif o == "-l":
run.list_file_types = True run.list_file_types = True
elif o == "--list-file-types": elif o == "--list-file-types":
run.list_file_types = optval.get_boolean_argument(a) run.list_file_types = optval.get_boolean_argument(a)
elif o == "-L": elif o == "-L":
run.localize_output = True run.localize_output = True
elif o == "--localize-output": elif o == "--localize-output":
run.localize_output = optval.get_boolean_argument(a) run.localize_output = optval.get_boolean_argument(a)
elif o == "-m": elif o == "-m":
run.include_metrics = True run.include_metrics = True
elif o == "--metrics": elif o == "--metrics":
run.include_metrics = optval.get_boolean_argument(a) run.include_metrics = optval.get_boolean_argument(a)
elif o == "-r": elif o == "-r":
run.responsibilities = True run.responsibilities = True
elif o == "--responsibilities": elif o == "--responsibilities":
run.responsibilities = optval.get_boolean_argument(a) run.responsibilities = optval.get_boolean_argument(a)
elif o == "--since": elif o == "--since":
interval.set_since(a) interval.set_since(a)
elif o == "--version": elif o == "--version":
version.output() version.output()
sys.exit(0) sys.exit(0)
elif o == "--grading": elif o == "--grading":
grading = optval.get_boolean_argument(a) grading = optval.get_boolean_argument(a)
run.include_metrics = grading run.include_metrics = grading
run.list_file_types = grading run.list_file_types = grading
run.responsibilities = grading run.responsibilities = grading
run.grading = grading run.grading = grading
run.hard = grading run.hard = grading
run.timeline = grading run.timeline = grading
run.useweeks = grading run.useweeks = grading
elif o == "-T": elif o == "-T":
run.timeline = True run.timeline = True
elif o == "--timeline": elif o == "--timeline":
run.timeline = optval.get_boolean_argument(a) run.timeline = optval.get_boolean_argument(a)
elif o == "--until": elif o == "--until":
interval.set_until(a) interval.set_until(a)
elif o == "-w": elif o == "-w":
run.useweeks = True run.useweeks = True
elif o == "--weeks": elif o == "--weeks":
run.useweeks = optval.get_boolean_argument(a) run.useweeks = optval.get_boolean_argument(a)
elif o in("-x", "--exclude"): elif o in ("-x", "--exclude"):
if clear_x_on_next_pass: if clear_x_on_next_pass:
clear_x_on_next_pass = False clear_x_on_next_pass = False
filtering.clear() filtering.clear()
filtering.add(a) filtering.add(a)
__check_python_version__() __check_python_version__()
run.process(repos) run.process(repos)
except (filtering.InvalidRegExpError, format.InvalidFormatError, optval.InvalidOptionArgument, getopt.error) as exception:
print(sys.argv[0], "\b:", exception.msg, file=sys.stderr)
print(_("Try `{0} --help' for more information.").format(sys.argv[0]), file=sys.stderr)
sys.exit(2)
except (filtering.InvalidRegExpError, format.InvalidFormatError, optval.InvalidOptionArgument, getopt.error) as exception:
print(sys.argv[0], "\b:", exception.msg, file=sys.stderr)
print(_("Try `{0} --help' for more information.").format(sys.argv[0]), file=sys.stderr)
sys.exit(2)
@atexit.register @atexit.register
def cleanup(): def cleanup():
clone.delete() clone.delete()
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -21,20 +21,21 @@
import hashlib import hashlib
try: try:
from urllib.parse import urlencode from urllib.parse import urlencode
except: except:
from urllib.parse import urlencode from urllib.parse import urlencode
from . import format from . import format
def get_url(email, size=20): def get_url(email, size=20):
md5hash = hashlib.md5(email.encode("utf-8").lower().strip()).hexdigest() md5hash = hashlib.md5(email.encode("utf-8").lower().strip()).hexdigest()
base_url = "https://www.gravatar.com/avatar/" + md5hash base_url = "https://www.gravatar.com/avatar/" + md5hash
params = None params = None
if format.get_selected() == "html": if format.get_selected() == "html":
params = {"default": "identicon", "size": size} params = {"default": "identicon", "size": size}
elif format.get_selected() == "xml" or format.get_selected() == "json": elif format.get_selected() == "xml" or format.get_selected() == "json":
params = {"default": "identicon"} params = {"default": "identicon"}
return base_url + "?" + urlencode(params) return base_url + "?" + urlencode(params)

View File

@ -18,13 +18,13 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import sys import sys
from .extensions import DEFAULT_EXTENSIONS from .extensions import DEFAULT_EXTENSIONS
from .format import __available_formats__ from .format import __available_formats__
__doc__ = _("""Usage: {0} [OPTION]... [REPOSITORY]... __doc__ = _(
"""Usage: {0} [OPTION]... [REPOSITORY]...
List information about the repository in REPOSITORY. If no repository is List information about the repository in REPOSITORY. If no repository is
specified, the current directory is used. If multiple repositories are specified, the current directory is used. If multiple repositories are
given, information will be merged into a unified statistical report. given, information will be merged into a unified statistical report.
@ -76,7 +76,9 @@ add or remove one of the specified extensions, see -f or --file-types for
more information. more information.
gitinspector requires that the git executable is available in your PATH. gitinspector requires that the git executable is available in your PATH.
Report gitinspector bugs to gitinspector@ejwa.se.""") Report gitinspector bugs to gitinspector@ejwa.se."""
)
def output(): def output():
print(__doc__.format(sys.argv[0], ",".join(DEFAULT_EXTENSIONS), ",".join(__available_formats__))) print(__doc__.format(sys.argv[0], ",".join(DEFAULT_EXTENSIONS), ",".join(__available_formats__)))

View File

@ -18,11 +18,10 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
try: try:
from shlex import quote from shlex import quote
except ImportError: except ImportError:
from pipes import quote from pipes import quote
__since__ = "" __since__ = ""
@ -30,26 +29,33 @@ __until__ = ""
__ref__ = "HEAD" __ref__ = "HEAD"
def has_interval(): def has_interval():
return __since__ + __until__ != "" return __since__ + __until__ != ""
def get_since(): def get_since():
return __since__ return __since__
def set_since(since): def set_since(since):
global __since__ global __since__
__since__ = "--since=" + quote(since) __since__ = "--since=" + quote(since)
def get_until(): def get_until():
return __until__ return __until__
def set_until(until): def set_until(until):
global __until__ global __until__
__until__ = "--until=" + quote(until) __until__ = "--until=" + quote(until)
def get_ref(): def get_ref():
return __ref__ return __ref__
def set_ref(ref): def set_ref(ref):
global __ref__ global __ref__
__ref__ = ref __ref__ = ref

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import gettext import gettext
import locale import locale
import os import os
@ -31,76 +30,84 @@ __enabled__ = False
__installed__ = False __installed__ = False
__translation__ = None __translation__ = None
#Dummy function used to handle string constants
# Dummy function used to handle string constants
def N_(message): def N_(message):
return message return message
def init(): def init():
global __enabled__ global __enabled__
global __installed__ global __installed__
global __translation__ global __translation__
if not __installed__: if not __installed__:
try: try:
locale.setlocale(locale.LC_ALL, "") locale.setlocale(locale.LC_ALL, "")
except locale.Error: except locale.Error:
__translation__ = gettext.NullTranslations() __translation__ = gettext.NullTranslations()
else: else:
lang = locale.getlocale() lang = locale.getlocale()
#Fix for non-POSIX-compliant systems (Windows et al.). # Fix for non-POSIX-compliant systems (Windows et al.).
if os.getenv('LANG') is None: if os.getenv("LANG") is None:
lang = locale.getdefaultlocale() lang = locale.getdefaultlocale()
if lang[0]: if lang[0]:
os.environ['LANG'] = lang[0] os.environ["LANG"] = lang[0]
if lang[0] is not None: if lang[0] is not None:
filename = basedir.get_basedir() + "/translations/messages_%s.mo" % lang[0][0:2] filename = basedir.get_basedir() + "/translations/messages_%s.mo" % lang[0][0:2]
try: try:
__translation__ = gettext.GNUTranslations(open(filename, "rb")) __translation__ = gettext.GNUTranslations(open(filename, "rb"))
except IOError: except IOError:
__translation__ = gettext.NullTranslations() __translation__ = gettext.NullTranslations()
else: else:
print("WARNING: Localization disabled because the system language could not be determined.", file=sys.stderr) print("WARNING: Localization disabled because the system language could not be determined.", file=sys.stderr)
__translation__ = gettext.NullTranslations() __translation__ = gettext.NullTranslations()
__enabled__ = True
__installed__ = True
__translation__.install()
__enabled__ = True
__installed__ = True
__translation__.install()
def check_compatibility(version): def check_compatibility(version):
if isinstance(__translation__, gettext.GNUTranslations): if isinstance(__translation__, gettext.GNUTranslations):
header_pattern = re.compile("^([^:\n]+): *(.*?) *$", re.MULTILINE) header_pattern = re.compile("^([^:\n]+): *(.*?) *$", re.MULTILINE)
header_entries = dict(header_pattern.findall(_(""))) header_entries = dict(header_pattern.findall(_("")))
if header_entries["Project-Id-Version"] != "gitinspector {0}".format(version):
print(
"WARNING: The translation for your system locale is not up to date with the current gitinspector "
"version. The current maintainer of this locale is {0}.".format(header_entries["Last-Translator"]),
file=sys.stderr,
)
if header_entries["Project-Id-Version"] != "gitinspector {0}".format(version):
print("WARNING: The translation for your system locale is not up to date with the current gitinspector "
"version. The current maintainer of this locale is {0}.".format(header_entries["Last-Translator"]),
file=sys.stderr)
def get_date(): def get_date():
if __enabled__ and isinstance(__translation__, gettext.GNUTranslations): if __enabled__ and isinstance(__translation__, gettext.GNUTranslations):
date = time.strftime("%x") date = time.strftime("%x")
if hasattr(date, 'decode'): if hasattr(date, "decode"):
date = date.decode("utf-8", "replace") date = date.decode("utf-8", "replace")
return date
else:
return time.strftime("%Y/%m/%d")
return date
else:
return time.strftime("%Y/%m/%d")
def enable(): def enable():
if isinstance(__translation__, gettext.GNUTranslations): if isinstance(__translation__, gettext.GNUTranslations):
__translation__.install(True) __translation__.install(True)
global __enabled__
__enabled__ = True
global __enabled__
__enabled__ = True
def disable(): def disable():
global __enabled__ global __enabled__
__enabled__ = False __enabled__ = False
if __installed__: if __installed__:
gettext.NullTranslations().install() gettext.NullTranslations().install()

View File

@ -23,103 +23,137 @@ import subprocess
from .changes import FileDiff from .changes import FileDiff
from . import comment, filtering, interval from . import comment, filtering, interval
__metric_eloc__ = {"java": 500, "c": 500, "cpp": 500, "cs": 500, "h": 300, "hpp": 300, "php": 500, "py": 500, "glsl": 1000, __metric_eloc__ = {
"rb": 500, "js": 500, "sql": 1000, "xml": 1000} "java": 500,
"c": 500,
"cpp": 500,
"cs": 500,
"h": 300,
"hpp": 300,
"php": 500,
"py": 500,
"glsl": 1000,
"rb": 500,
"js": 500,
"sql": 1000,
"xml": 1000,
}
__metric_cc_tokens__ = [[["java", "js", "c", "cc", "cpp"], ["else", r"for\s+\(.*\)", r"if\s+\(.*\)", r"case\s+\w+:", __metric_cc_tokens__ = [
"default:", r"while\s+\(.*\)"], [
["assert", "break", "continue", "return"]], ["java", "js", "c", "cc", "cpp"],
[["cs"], ["else", r"for\s+\(.*\)", r"foreach\s+\(.*\)", r"goto\s+\w+:", r"if\s+\(.*\)", r"case\s+\w+:", ["else", r"for\s+\(.*\)", r"if\s+\(.*\)", r"case\s+\w+:", "default:", r"while\s+\(.*\)"],
"default:", r"while\s+\(.*\)"], ["assert", "break", "continue", "return"],
["assert", "break", "continue", "return"]], ],
[["py"], [r"^\s+elif .*:$", r"^\s+else:$", r"^\s+for .*:", r"^\s+if .*:$", r"^\s+while .*:$"], [
[r"^\s+assert", "break", "continue", "return"]]] ["cs"],
[
"else",
r"for\s+\(.*\)",
r"foreach\s+\(.*\)",
r"goto\s+\w+:",
r"if\s+\(.*\)",
r"case\s+\w+:",
"default:",
r"while\s+\(.*\)",
],
["assert", "break", "continue", "return"],
],
[
["py"],
[r"^\s+elif .*:$", r"^\s+else:$", r"^\s+for .*:", r"^\s+if .*:$", r"^\s+while .*:$"],
[r"^\s+assert", "break", "continue", "return"],
],
]
METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD = 50 METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD = 50
METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD = 0.75 METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD = 0.75
class MetricsLogic(object): class MetricsLogic(object):
def __init__(self): def __init__(self):
self.eloc = {} self.eloc = {}
self.cyclomatic_complexity = {} self.cyclomatic_complexity = {}
self.cyclomatic_complexity_density = {} self.cyclomatic_complexity_density = {}
ls_tree_p = subprocess.Popen(["git", "ls-tree", "--name-only", "-r", interval.get_ref()], ls_tree_p = subprocess.Popen(
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) ["git", "ls-tree", "--name-only", "-r", interval.get_ref()], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
lines = ls_tree_p.communicate()[0].splitlines() )
ls_tree_p.stdout.close() lines = ls_tree_p.communicate()[0].splitlines()
ls_tree_p.stdout.close()
if ls_tree_p.returncode == 0: if ls_tree_p.returncode == 0:
for i in lines: for i in lines:
i = i.strip().decode("unicode_escape", "ignore") i = i.strip().decode("unicode_escape", "ignore")
i = i.encode("latin-1", "replace") i = i.encode("latin-1", "replace")
i = i.decode("utf-8", "replace").strip("\"").strip("'").strip() i = i.decode("utf-8", "replace").strip('"').strip("'").strip()
if FileDiff.is_valid_extension(i) and not filtering.set_filtered(FileDiff.get_filename(i)): if FileDiff.is_valid_extension(i) and not filtering.set_filtered(FileDiff.get_filename(i)):
file_r = subprocess.Popen(["git", "show", interval.get_ref() + ":{0}".format(i.strip())], file_r = subprocess.Popen(
stdout=subprocess.PIPE).stdout.readlines() ["git", "show", interval.get_ref() + ":{0}".format(i.strip())], stdout=subprocess.PIPE
).stdout.readlines()
extension = FileDiff.get_extension(i) extension = FileDiff.get_extension(i)
lines = MetricsLogic.get_eloc(file_r, extension) lines = MetricsLogic.get_eloc(file_r, extension)
cycc = MetricsLogic.get_cyclomatic_complexity(file_r, extension) cycc = MetricsLogic.get_cyclomatic_complexity(file_r, extension)
if __metric_eloc__.get(extension, None) != None and __metric_eloc__[extension] < lines: if __metric_eloc__.get(extension, None) is not None and __metric_eloc__[extension] < lines:
self.eloc[i.strip()] = lines self.eloc[i.strip()] = lines
if METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD < cycc: if METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD < cycc:
self.cyclomatic_complexity[i.strip()] = cycc self.cyclomatic_complexity[i.strip()] = cycc
if lines > 0 and METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD < cycc / float(lines): if lines > 0 and METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD < cycc / float(lines):
self.cyclomatic_complexity_density[i.strip()] = cycc / float(lines) self.cyclomatic_complexity_density[i.strip()] = cycc / float(lines)
def __iadd__(self, other): def __iadd__(self, other):
try: try:
self.eloc.update(other.eloc) self.eloc.update(other.eloc)
self.cyclomatic_complexity.update(other.cyclomatic_complexity) self.cyclomatic_complexity.update(other.cyclomatic_complexity)
self.cyclomatic_complexity_density.update(other.cyclomatic_complexity_density) self.cyclomatic_complexity_density.update(other.cyclomatic_complexity_density)
return self return self
except AttributeError: except AttributeError:
return other; return other
@staticmethod @staticmethod
def get_cyclomatic_complexity(file_r, extension): def get_cyclomatic_complexity(file_r, extension):
is_inside_comment = False is_inside_comment = False
cc_counter = 0 cc_counter = 0
entry_tokens = None entry_tokens = None
exit_tokens = None exit_tokens = None
for i in __metric_cc_tokens__: for i in __metric_cc_tokens__:
if extension in i[0]: if extension in i[0]:
entry_tokens = i[1] entry_tokens = i[1]
exit_tokens = i[2] exit_tokens = i[2]
if entry_tokens or exit_tokens: if entry_tokens or exit_tokens:
for i in file_r: for i in file_r:
i = i.decode("utf-8", "replace") i = i.decode("utf-8", "replace")
(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i) (_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i)
if not is_inside_comment and not comment.is_comment(extension, i): if not is_inside_comment and not comment.is_comment(extension, i):
for j in entry_tokens: for j in entry_tokens:
if re.search(j, i, re.DOTALL): if re.search(j, i, re.DOTALL):
cc_counter += 2 cc_counter += 2
for j in exit_tokens: for j in exit_tokens:
if re.search(j, i, re.DOTALL): if re.search(j, i, re.DOTALL):
cc_counter += 1 cc_counter += 1
return cc_counter return cc_counter
return -1 return -1
@staticmethod @staticmethod
def get_eloc(file_r, extension): def get_eloc(file_r, extension):
is_inside_comment = False is_inside_comment = False
eloc_counter = 0 eloc_counter = 0
for i in file_r: for i in file_r:
i = i.decode("utf-8", "replace") i = i.decode("utf-8", "replace")
(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i) (_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i)
if not is_inside_comment and not comment.is_comment(extension, i): if not is_inside_comment and not comment.is_comment(extension, i):
eloc_counter += 1 eloc_counter += 1
return eloc_counter return eloc_counter

View File

@ -20,47 +20,53 @@
import getopt import getopt
class InvalidOptionArgument(Exception): class InvalidOptionArgument(Exception):
def __init__(self, msg): def __init__(self, msg):
super(InvalidOptionArgument, self).__init__(msg) super(InvalidOptionArgument, self).__init__(msg)
self.msg = msg self.msg = msg
def __find_arg_in_options__(arg, options): def __find_arg_in_options__(arg, options):
for opt in options: for opt in options:
if opt[0].find(arg) == 0: if opt[0].find(arg) == 0:
return opt return opt
return None
return None
def __find_options_to_extend__(long_options): def __find_options_to_extend__(long_options):
options_to_extend = [] options_to_extend = []
for num, arg in enumerate(long_options): for num, arg in enumerate(long_options):
arg = arg.split(":") arg = arg.split(":")
if len(arg) == 2: if len(arg) == 2:
long_options[num] = arg[0] + "=" long_options[num] = arg[0] + "="
options_to_extend.append(("--" + arg[0], arg[1])) options_to_extend.append(("--" + arg[0], arg[1]))
return options_to_extend
return options_to_extend
# This is a duplicate of gnu_getopt, but with support for optional arguments in long options, in the form; "arg:default_value". # This is a duplicate of gnu_getopt, but with support for optional arguments in long options, in the form; "arg:default_value".
def gnu_getopt(args, options, long_options): def gnu_getopt(args, options, long_options):
options_to_extend = __find_options_to_extend__(long_options) options_to_extend = __find_options_to_extend__(long_options)
for num, arg in enumerate(args): for num, arg in enumerate(args):
opt = __find_arg_in_options__(arg, options_to_extend) opt = __find_arg_in_options__(arg, options_to_extend)
if opt: if opt:
args[num] = arg + "=" + opt[1] args[num] = arg + "=" + opt[1]
return getopt.gnu_getopt(args, options, long_options)
return getopt.gnu_getopt(args, options, long_options)
def get_boolean_argument(arg): def get_boolean_argument(arg):
if isinstance(arg, bool): if isinstance(arg, bool):
return arg return arg
elif arg == None or arg.lower() == "false" or arg.lower() == "f" or arg == "0": elif arg is None or arg.lower() == "false" or arg.lower() == "f" or arg == "0":
return False return False
elif arg.lower() == "true" or arg.lower() == "t" or arg == "1": elif arg.lower() == "true" or arg.lower() == "t" or arg == "1":
return True return True
raise InvalidOptionArgument(_("The given option argument is not a valid boolean.")) raise InvalidOptionArgument(_("The given option argument is not a valid boolean."))

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import json import json
import sys import sys
import textwrap import textwrap
@ -27,128 +26,160 @@ from .. import format, gravatar, terminal
from ..blame import Blame from ..blame import Blame
from .outputable import Outputable from .outputable import Outputable
BLAME_INFO_TEXT = N_("Below are the number of rows from each author that have survived and are still " BLAME_INFO_TEXT = N_(
"intact in the current revision") "Below are the number of rows from each author that have survived and are still " "intact in the current revision"
)
class BlameOutput(Outputable): class BlameOutput(Outputable):
def __init__(self, changes, blame): def __init__(self, changes, blame):
if format.is_interactive_format(): if format.is_interactive_format():
print("") print("")
self.changes = changes self.changes = changes
self.blame = blame self.blame = blame
Outputable.__init__(self) Outputable.__init__(self)
def output_html(self): def output_html(self):
blame_xml = "<div><div class=\"box\">" blame_xml = '<div><div class="box">'
blame_xml += "<p>" + _(BLAME_INFO_TEXT) + ".</p><div><table id=\"blame\" class=\"git\">" blame_xml += "<p>" + _(BLAME_INFO_TEXT) + '.</p><div><table id="blame" class="git">'
blame_xml += "<thead><tr> <th>{0}</th> <th>{1}</th> <th>{2}</th> <th>{3}</th> <th>{4}</th> </tr></thead>".format( blame_xml += "<thead><tr> <th>{0}</th> <th>{1}</th> <th>{2}</th> <th>{3}</th> <th>{4}</th> </tr></thead>".format(
_("Author"), _("Rows"), _("Stability"), _("Age"), _("% in comments")) _("Author"), _("Rows"), _("Stability"), _("Age"), _("% in comments")
blame_xml += "<tbody>" )
chart_data = "" blame_xml += "<tbody>"
blames = sorted(self.blame.get_summed_blames().items()) chart_data = ""
total_blames = 0 blames = sorted(self.blame.get_summed_blames().items())
total_blames = 0
for i in blames: for i in blames:
total_blames += i[1].rows total_blames += i[1].rows
for i, entry in enumerate(blames): for i, entry in enumerate(blames):
work_percentage = str("{0:.2f}".format(100.0 * entry[1].rows / total_blames)) work_percentage = str("{0:.2f}".format(100.0 * entry[1].rows / total_blames))
blame_xml += "<tr " + ("class=\"odd\">" if i % 2 == 1 else ">") blame_xml += "<tr " + ('class="odd">' if i % 2 == 1 else ">")
if format.get_selected() == "html": if format.get_selected() == "html":
author_email = self.changes.get_latest_email_by_author(entry[0]) author_email = self.changes.get_latest_email_by_author(entry[0])
blame_xml += "<td><img src=\"{0}\"/>{1}</td>".format(gravatar.get_url(author_email), entry[0]) blame_xml += '<td><img src="{0}"/>{1}</td>'.format(gravatar.get_url(author_email), entry[0])
else: else:
blame_xml += "<td>" + entry[0] + "</td>" blame_xml += "<td>" + entry[0] + "</td>"
blame_xml += "<td>" + str(entry[1].rows) + "</td>" blame_xml += "<td>" + str(entry[1].rows) + "</td>"
blame_xml += "<td>" + ("{0:.1f}".format(Blame.get_stability(entry[0], entry[1].rows, self.changes)) + "</td>") blame_xml += "<td>" + ("{0:.1f}".format(Blame.get_stability(entry[0], entry[1].rows, self.changes)) + "</td>")
blame_xml += "<td>" + "{0:.1f}".format(float(entry[1].skew) / entry[1].rows) + "</td>" blame_xml += "<td>" + "{0:.1f}".format(float(entry[1].skew) / entry[1].rows) + "</td>"
blame_xml += "<td>" + "{0:.2f}".format(100.0 * entry[1].comments / entry[1].rows) + "</td>" blame_xml += "<td>" + "{0:.2f}".format(100.0 * entry[1].comments / entry[1].rows) + "</td>"
blame_xml += "<td style=\"display: none\">" + work_percentage + "</td>" blame_xml += '<td style="display: none">' + work_percentage + "</td>"
blame_xml += "</tr>" blame_xml += "</tr>"
chart_data += "{{label: {0}, data: {1}}}".format(json.dumps(entry[0]), work_percentage) chart_data += "{{label: {0}, data: {1}}}".format(json.dumps(entry[0]), work_percentage)
if blames[-1] != entry: if blames[-1] != entry:
chart_data += ", " chart_data += ", "
blame_xml += "<tfoot><tr> <td colspan=\"5\">&nbsp;</td> </tr></tfoot></tbody></table>" blame_xml += '<tfoot><tr> <td colspan="5">&nbsp;</td> </tr></tfoot></tbody></table>'
blame_xml += "<div class=\"chart\" id=\"blame_chart\"></div></div>" blame_xml += '<div class="chart" id="blame_chart"></div></div>'
blame_xml += "<script type=\"text/javascript\">" blame_xml += '<script type="text/javascript">'
blame_xml += " blame_plot = $.plot($(\"#blame_chart\"), [{0}], {{".format(chart_data) blame_xml += ' blame_plot = $.plot($("#blame_chart"), [{0}], {{'.format(chart_data)
blame_xml += " series: {" blame_xml += " series: {"
blame_xml += " pie: {" blame_xml += " pie: {"
blame_xml += " innerRadius: 0.4," blame_xml += " innerRadius: 0.4,"
blame_xml += " show: true," blame_xml += " show: true,"
blame_xml += " combine: {" blame_xml += " combine: {"
blame_xml += " threshold: 0.01," blame_xml += " threshold: 0.01,"
blame_xml += " label: \"" + _("Minor Authors") + "\"" blame_xml += ' label: "' + _("Minor Authors") + '"'
blame_xml += " }" blame_xml += " }"
blame_xml += " }" blame_xml += " }"
blame_xml += " }, grid: {" blame_xml += " }, grid: {"
blame_xml += " hoverable: true" blame_xml += " hoverable: true"
blame_xml += " }" blame_xml += " }"
blame_xml += " });" blame_xml += " });"
blame_xml += "</script></div></div>" blame_xml += "</script></div></div>"
print(blame_xml) print(blame_xml)
def output_json(self): def output_json(self):
message_json = "\t\t\t\"message\": \"" + _(BLAME_INFO_TEXT) + "\",\n" message_json = '\t\t\t"message": "' + _(BLAME_INFO_TEXT) + '",\n'
blame_json = "" blame_json = ""
for i in sorted(self.blame.get_summed_blames().items()): for i in sorted(self.blame.get_summed_blames().items()):
author_email = self.changes.get_latest_email_by_author(i[0]) author_email = self.changes.get_latest_email_by_author(i[0])
name_json = "\t\t\t\t\"name\": \"" + i[0] + "\",\n" name_json = '\t\t\t\t"name": "' + i[0] + '",\n'
email_json = "\t\t\t\t\"email\": \"" + author_email + "\",\n" email_json = '\t\t\t\t"email": "' + author_email + '",\n'
gravatar_json = "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n" gravatar_json = '\t\t\t\t"gravatar": "' + gravatar.get_url(author_email) + '",\n'
rows_json = "\t\t\t\t\"rows\": " + str(i[1].rows) + ",\n" rows_json = '\t\t\t\t"rows": ' + str(i[1].rows) + ",\n"
stability_json = ("\t\t\t\t\"stability\": " + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, stability_json = (
self.changes)) + ",\n") '\t\t\t\t"stability": ' + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, self.changes)) + ",\n"
age_json = ("\t\t\t\t\"age\": " + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + ",\n") )
percentage_in_comments_json = ("\t\t\t\t\"percentage_in_comments\": " + age_json = '\t\t\t\t"age": ' + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + ",\n"
"{0:.2f}".format(100.0 * i[1].comments / i[1].rows) + "\n") percentage_in_comments_json = (
blame_json += ("{\n" + name_json + email_json + gravatar_json + rows_json + stability_json + age_json + '\t\t\t\t"percentage_in_comments": ' + "{0:.2f}".format(100.0 * i[1].comments / i[1].rows) + "\n"
percentage_in_comments_json + "\t\t\t},") )
else: blame_json += (
blame_json = blame_json[:-1] "{\n"
+ name_json
+ email_json
+ gravatar_json
+ rows_json
+ stability_json
+ age_json
+ percentage_in_comments_json
+ "\t\t\t},"
)
else:
blame_json = blame_json[:-1]
print(",\n\t\t\"blame\": {\n" + message_json + "\t\t\t\"authors\": [\n\t\t\t" + blame_json + "]\n\t\t}", end="") print(',\n\t\t"blame": {\n' + message_json + '\t\t\t"authors": [\n\t\t\t' + blame_json + "]\n\t\t}", end="")
def output_text(self): def output_text(self):
if sys.stdout.isatty() and format.is_interactive_format(): if sys.stdout.isatty() and format.is_interactive_format():
terminal.clear_row() terminal.clear_row()
print(textwrap.fill(_(BLAME_INFO_TEXT) + ":", width=terminal.get_size()[0]) + "\n") print(textwrap.fill(_(BLAME_INFO_TEXT) + ":", width=terminal.get_size()[0]) + "\n")
terminal.printb(terminal.ljust(_("Author"), 21) + terminal.rjust(_("Rows"), 10) + terminal.rjust(_("Stability"), 15) + terminal.printb(
terminal.rjust(_("Age"), 13) + terminal.rjust(_("% in comments"), 20)) terminal.ljust(_("Author"), 21)
+ terminal.rjust(_("Rows"), 10)
+ terminal.rjust(_("Stability"), 15)
+ terminal.rjust(_("Age"), 13)
+ terminal.rjust(_("% in comments"), 20)
)
for i in sorted(self.blame.get_summed_blames().items()): for i in sorted(self.blame.get_summed_blames().items()):
print(terminal.ljust(i[0], 20)[0:20 - terminal.get_excess_column_count(i[0])], end=" ") print(terminal.ljust(i[0], 20)[0:20 - terminal.get_excess_column_count(i[0])], end=" ")
print(str(i[1].rows).rjust(10), end=" ") print(str(i[1].rows).rjust(10), end=" ")
print("{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, self.changes)).rjust(14), end=" ") print("{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, self.changes)).rjust(14), end=" ")
print("{0:.1f}".format(float(i[1].skew) / i[1].rows).rjust(12), end=" ") print("{0:.1f}".format(float(i[1].skew) / i[1].rows).rjust(12), end=" ")
print("{0:.2f}".format(100.0 * i[1].comments / i[1].rows).rjust(19)) print("{0:.2f}".format(100.0 * i[1].comments / i[1].rows).rjust(19))
def output_xml(self): def output_xml(self):
message_xml = "\t\t<message>" + _(BLAME_INFO_TEXT) + "</message>\n" message_xml = "\t\t<message>" + _(BLAME_INFO_TEXT) + "</message>\n"
blame_xml = "" blame_xml = ""
for i in sorted(self.blame.get_summed_blames().items()): for i in sorted(self.blame.get_summed_blames().items()):
author_email = self.changes.get_latest_email_by_author(i[0]) author_email = self.changes.get_latest_email_by_author(i[0])
name_xml = "\t\t\t\t<name>" + i[0] + "</name>\n" name_xml = "\t\t\t\t<name>" + i[0] + "</name>\n"
email_xml = "\t\t\t\t<email>" + author_email + "</email>\n" email_xml = "\t\t\t\t<email>" + author_email + "</email>\n"
gravatar_xml = "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n" gravatar_xml = "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
rows_xml = "\t\t\t\t<rows>" + str(i[1].rows) + "</rows>\n" rows_xml = "\t\t\t\t<rows>" + str(i[1].rows) + "</rows>\n"
stability_xml = ("\t\t\t\t<stability>" + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, stability_xml = (
self.changes)) + "</stability>\n") "\t\t\t\t<stability>" + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, self.changes)) + "</stability>\n"
age_xml = ("\t\t\t\t<age>" + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + "</age>\n") )
percentage_in_comments_xml = ("\t\t\t\t<percentage-in-comments>" + "{0:.2f}".format(100.0 * i[1].comments / i[1].rows) + age_xml = "\t\t\t\t<age>" + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + "</age>\n"
"</percentage-in-comments>\n") percentage_in_comments_xml = (
blame_xml += ("\t\t\t<author>\n" + name_xml + email_xml + gravatar_xml + rows_xml + stability_xml + "\t\t\t\t<percentage-in-comments>"
age_xml + percentage_in_comments_xml + "\t\t\t</author>\n") + "{0:.2f}".format(100.0 * i[1].comments / i[1].rows)
+ "</percentage-in-comments>\n"
)
blame_xml += (
"\t\t\t<author>\n"
+ name_xml
+ email_xml
+ gravatar_xml
+ rows_xml
+ stability_xml
+ age_xml
+ percentage_in_comments_xml
+ "\t\t\t</author>\n"
)
print("\t<blame>\n" + message_xml + "\t\t<authors>\n" + blame_xml + "\t\t</authors>\n\t</blame>") print("\t<blame>\n" + message_xml + "\t\t<authors>\n" + blame_xml + "\t\t</authors>\n\t</blame>")

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import json import json
import textwrap import textwrap
from ..localization import N_ from ..localization import N_
@ -28,162 +27,189 @@ from .outputable import Outputable
HISTORICAL_INFO_TEXT = N_("The following historical commit information, by author, was found") HISTORICAL_INFO_TEXT = N_("The following historical commit information, by author, was found")
NO_COMMITED_FILES_TEXT = N_("No commited files with the specified extensions were found") NO_COMMITED_FILES_TEXT = N_("No commited files with the specified extensions were found")
class ChangesOutput(Outputable): class ChangesOutput(Outputable):
def __init__(self, changes): def __init__(self, changes):
self.changes = changes self.changes = changes
Outputable.__init__(self) Outputable.__init__(self)
def output_html(self): def output_html(self):
authorinfo_list = self.changes.get_authorinfo_list() authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0 total_changes = 0.0
changes_xml = "<div><div class=\"box\">" changes_xml = '<div><div class="box">'
chart_data = "" chart_data = ""
for i in authorinfo_list: for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions total_changes += authorinfo_list.get(i).deletions
if authorinfo_list: if authorinfo_list:
changes_xml += "<p>" + _(HISTORICAL_INFO_TEXT) + ".</p><div><table id=\"changes\" class=\"git\">" changes_xml += "<p>" + _(HISTORICAL_INFO_TEXT) + '.</p><div><table id="changes" class="git">'
changes_xml += "<thead><tr> <th>{0}</th> <th>{1}</th> <th>{2}</th> <th>{3}</th> <th>{4}</th>".format( changes_xml += "<thead><tr> <th>{0}</th> <th>{1}</th> <th>{2}</th> <th>{3}</th> <th>{4}</th>".format(
_("Author"), _("Commits"), _("Insertions"), _("Deletions"), _("% of changes")) _("Author"), _("Commits"), _("Insertions"), _("Deletions"), _("% of changes")
changes_xml += "</tr></thead><tbody>" )
changes_xml += "</tr></thead><tbody>"
for i, entry in enumerate(sorted(authorinfo_list)): for i, entry in enumerate(sorted(authorinfo_list)):
authorinfo = authorinfo_list.get(entry) authorinfo = authorinfo_list.get(entry)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100 percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
changes_xml += "<tr " + ("class=\"odd\">" if i % 2 == 1 else ">") changes_xml += "<tr " + ('class="odd">' if i % 2 == 1 else ">")
if format.get_selected() == "html": if format.get_selected() == "html":
changes_xml += "<td><img src=\"{0}\"/>{1}</td>".format( changes_xml += '<td><img src="{0}"/>{1}</td>'.format(
gravatar.get_url(self.changes.get_latest_email_by_author(entry)), entry) gravatar.get_url(self.changes.get_latest_email_by_author(entry)), entry
else: )
changes_xml += "<td>" + entry + "</td>" else:
changes_xml += "<td>" + entry + "</td>"
changes_xml += "<td>" + str(authorinfo.commits) + "</td>" changes_xml += "<td>" + str(authorinfo.commits) + "</td>"
changes_xml += "<td>" + str(authorinfo.insertions) + "</td>" changes_xml += "<td>" + str(authorinfo.insertions) + "</td>"
changes_xml += "<td>" + str(authorinfo.deletions) + "</td>" changes_xml += "<td>" + str(authorinfo.deletions) + "</td>"
changes_xml += "<td>" + "{0:.2f}".format(percentage) + "</td>" changes_xml += "<td>" + "{0:.2f}".format(percentage) + "</td>"
changes_xml += "</tr>" changes_xml += "</tr>"
chart_data += "{{label: {0}, data: {1}}}".format(json.dumps(entry), "{0:.2f}".format(percentage)) chart_data += "{{label: {0}, data: {1}}}".format(json.dumps(entry), "{0:.2f}".format(percentage))
if sorted(authorinfo_list)[-1] != entry: if sorted(authorinfo_list)[-1] != entry:
chart_data += ", " chart_data += ", "
changes_xml += ("<tfoot><tr> <td colspan=\"5\">&nbsp;</td> </tr></tfoot></tbody></table>") changes_xml += '<tfoot><tr> <td colspan="5">&nbsp;</td> </tr></tfoot></tbody></table>'
changes_xml += "<div class=\"chart\" id=\"changes_chart\"></div></div>" changes_xml += '<div class="chart" id="changes_chart"></div></div>'
changes_xml += "<script type=\"text/javascript\">" changes_xml += '<script type="text/javascript">'
changes_xml += " changes_plot = $.plot($(\"#changes_chart\"), [{0}], {{".format(chart_data) changes_xml += ' changes_plot = $.plot($("#changes_chart"), [{0}], {{'.format(chart_data)
changes_xml += " series: {" changes_xml += " series: {"
changes_xml += " pie: {" changes_xml += " pie: {"
changes_xml += " innerRadius: 0.4," changes_xml += " innerRadius: 0.4,"
changes_xml += " show: true," changes_xml += " show: true,"
changes_xml += " combine: {" changes_xml += " combine: {"
changes_xml += " threshold: 0.01," changes_xml += " threshold: 0.01,"
changes_xml += " label: \"" + _("Minor Authors") + "\"" changes_xml += ' label: "' + _("Minor Authors") + '"'
changes_xml += " }" changes_xml += " }"
changes_xml += " }" changes_xml += " }"
changes_xml += " }, grid: {" changes_xml += " }, grid: {"
changes_xml += " hoverable: true" changes_xml += " hoverable: true"
changes_xml += " }" changes_xml += " }"
changes_xml += " });" changes_xml += " });"
changes_xml += "</script>" changes_xml += "</script>"
else: else:
changes_xml += "<p>" + _(NO_COMMITED_FILES_TEXT) + ".</p>" changes_xml += "<p>" + _(NO_COMMITED_FILES_TEXT) + ".</p>"
changes_xml += "</div></div>" changes_xml += "</div></div>"
print(changes_xml) print(changes_xml)
def output_json(self): def output_json(self):
authorinfo_list = self.changes.get_authorinfo_list() authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0 total_changes = 0.0
for i in authorinfo_list: for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions total_changes += authorinfo_list.get(i).deletions
if authorinfo_list: if authorinfo_list:
message_json = "\t\t\t\"message\": \"" + _(HISTORICAL_INFO_TEXT) + "\",\n" message_json = '\t\t\t"message": "' + _(HISTORICAL_INFO_TEXT) + '",\n'
changes_json = "" changes_json = ""
for i in sorted(authorinfo_list): for i in sorted(authorinfo_list):
author_email = self.changes.get_latest_email_by_author(i) author_email = self.changes.get_latest_email_by_author(i)
authorinfo = authorinfo_list.get(i) authorinfo = authorinfo_list.get(i)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100 percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
name_json = "\t\t\t\t\"name\": \"" + i + "\",\n" name_json = '\t\t\t\t"name": "' + i + '",\n'
email_json = "\t\t\t\t\"email\": \"" + author_email + "\",\n" email_json = '\t\t\t\t"email": "' + author_email + '",\n'
gravatar_json = "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n" gravatar_json = '\t\t\t\t"gravatar": "' + gravatar.get_url(author_email) + '",\n'
commits_json = "\t\t\t\t\"commits\": " + str(authorinfo.commits) + ",\n" commits_json = '\t\t\t\t"commits": ' + str(authorinfo.commits) + ",\n"
insertions_json = "\t\t\t\t\"insertions\": " + str(authorinfo.insertions) + ",\n" insertions_json = '\t\t\t\t"insertions": ' + str(authorinfo.insertions) + ",\n"
deletions_json = "\t\t\t\t\"deletions\": " + str(authorinfo.deletions) + ",\n" deletions_json = '\t\t\t\t"deletions": ' + str(authorinfo.deletions) + ",\n"
percentage_json = "\t\t\t\t\"percentage_of_changes\": " + "{0:.2f}".format(percentage) + "\n" percentage_json = '\t\t\t\t"percentage_of_changes": ' + "{0:.2f}".format(percentage) + "\n"
changes_json += ("{\n" + name_json + email_json + gravatar_json + commits_json + changes_json += (
insertions_json + deletions_json + percentage_json + "\t\t\t}") "{\n"
changes_json += "," + name_json
else: + email_json
changes_json = changes_json[:-1] + gravatar_json
+ commits_json
+ insertions_json
+ deletions_json
+ percentage_json
+ "\t\t\t}"
)
changes_json += ","
else:
changes_json = changes_json[:-1]
print("\t\t\"changes\": {\n" + message_json + "\t\t\t\"authors\": [\n\t\t\t" + changes_json + "]\n\t\t}", end="") print('\t\t"changes": {\n' + message_json + '\t\t\t"authors": [\n\t\t\t' + changes_json + "]\n\t\t}", end="")
else: else:
print("\t\t\"exception\": \"" + _(NO_COMMITED_FILES_TEXT) + "\"") print('\t\t"exception": "' + _(NO_COMMITED_FILES_TEXT) + '"')
def output_text(self): def output_text(self):
authorinfo_list = self.changes.get_authorinfo_list() authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0 total_changes = 0.0
for i in authorinfo_list: for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions total_changes += authorinfo_list.get(i).deletions
if authorinfo_list: if authorinfo_list:
print(textwrap.fill(_(HISTORICAL_INFO_TEXT) + ":", width=terminal.get_size()[0]) + "\n") print(textwrap.fill(_(HISTORICAL_INFO_TEXT) + ":", width=terminal.get_size()[0]) + "\n")
terminal.printb(terminal.ljust(_("Author"), 21) + terminal.rjust(_("Commits"), 13) + terminal.printb(
terminal.rjust(_("Insertions"), 14) + terminal.rjust(_("Deletions"), 15) + terminal.ljust(_("Author"), 21)
terminal.rjust(_("% of changes"), 16)) + terminal.rjust(_("Commits"), 13)
+ terminal.rjust(_("Insertions"), 14)
+ terminal.rjust(_("Deletions"), 15)
+ terminal.rjust(_("% of changes"), 16)
)
for i in sorted(authorinfo_list): for i in sorted(authorinfo_list):
authorinfo = authorinfo_list.get(i) authorinfo = authorinfo_list.get(i)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100 percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
print(terminal.ljust(i, 20)[0:20 - terminal.get_excess_column_count(i)], end=" ") print(terminal.ljust(i, 20)[0:20 - terminal.get_excess_column_count(i)], end=" ")
print(str(authorinfo.commits).rjust(13), end=" ") print(str(authorinfo.commits).rjust(13), end=" ")
print(str(authorinfo.insertions).rjust(13), end=" ") print(str(authorinfo.insertions).rjust(13), end=" ")
print(str(authorinfo.deletions).rjust(14), end=" ") print(str(authorinfo.deletions).rjust(14), end=" ")
print("{0:.2f}".format(percentage).rjust(15)) print("{0:.2f}".format(percentage).rjust(15))
else: else:
print(_(NO_COMMITED_FILES_TEXT) + ".") print(_(NO_COMMITED_FILES_TEXT) + ".")
def output_xml(self): def output_xml(self):
authorinfo_list = self.changes.get_authorinfo_list() authorinfo_list = self.changes.get_authorinfo_list()
total_changes = 0.0 total_changes = 0.0
for i in authorinfo_list: for i in authorinfo_list:
total_changes += authorinfo_list.get(i).insertions total_changes += authorinfo_list.get(i).insertions
total_changes += authorinfo_list.get(i).deletions total_changes += authorinfo_list.get(i).deletions
if authorinfo_list: if authorinfo_list:
message_xml = "\t\t<message>" + _(HISTORICAL_INFO_TEXT) + "</message>\n" message_xml = "\t\t<message>" + _(HISTORICAL_INFO_TEXT) + "</message>\n"
changes_xml = "" changes_xml = ""
for i in sorted(authorinfo_list): for i in sorted(authorinfo_list):
author_email = self.changes.get_latest_email_by_author(i) author_email = self.changes.get_latest_email_by_author(i)
authorinfo = authorinfo_list.get(i) authorinfo = authorinfo_list.get(i)
percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100 percentage = 0 if total_changes == 0 else (authorinfo.insertions + authorinfo.deletions) / total_changes * 100
name_xml = "\t\t\t\t<name>" + i + "</name>\n" name_xml = "\t\t\t\t<name>" + i + "</name>\n"
email_xml = "\t\t\t\t<email>" + author_email + "</email>\n" email_xml = "\t\t\t\t<email>" + author_email + "</email>\n"
gravatar_xml = "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n" gravatar_xml = "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
commits_xml = "\t\t\t\t<commits>" + str(authorinfo.commits) + "</commits>\n" commits_xml = "\t\t\t\t<commits>" + str(authorinfo.commits) + "</commits>\n"
insertions_xml = "\t\t\t\t<insertions>" + str(authorinfo.insertions) + "</insertions>\n" insertions_xml = "\t\t\t\t<insertions>" + str(authorinfo.insertions) + "</insertions>\n"
deletions_xml = "\t\t\t\t<deletions>" + str(authorinfo.deletions) + "</deletions>\n" deletions_xml = "\t\t\t\t<deletions>" + str(authorinfo.deletions) + "</deletions>\n"
percentage_xml = "\t\t\t\t<percentage-of-changes>" + "{0:.2f}".format(percentage) + "</percentage-of-changes>\n" percentage_xml = (
"\t\t\t\t<percentage-of-changes>" + "{0:.2f}".format(percentage) + "</percentage-of-changes>\n"
)
changes_xml += ("\t\t\t<author>\n" + name_xml + email_xml + gravatar_xml + commits_xml + changes_xml += (
insertions_xml + deletions_xml + percentage_xml + "\t\t\t</author>\n") "\t\t\t<author>\n"
+ name_xml
+ email_xml
+ gravatar_xml
+ commits_xml
+ insertions_xml
+ deletions_xml
+ percentage_xml
+ "\t\t\t</author>\n"
)
print("\t<changes>\n" + message_xml + "\t\t<authors>\n" + changes_xml + "\t\t</authors>\n\t</changes>") print("\t<changes>\n" + message_xml + "\t\t<authors>\n" + changes_xml + "\t\t</authors>\n\t</changes>")
else: else:
print("\t<changes>\n\t\t<exception>" + _(NO_COMMITED_FILES_TEXT) + "</exception>\n\t</changes>") print("\t<changes>\n\t\t<exception>" + _(NO_COMMITED_FILES_TEXT) + "</exception>\n\t</changes>")

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import textwrap import textwrap
from ..localization import N_ from ..localization import N_
from .. import extensions, terminal from .. import extensions, terminal
@ -28,70 +27,93 @@ from .outputable import Outputable
EXTENSIONS_INFO_TEXT = N_("The extensions below were found in the repository history") EXTENSIONS_INFO_TEXT = N_("The extensions below were found in the repository history")
EXTENSIONS_MARKED_TEXT = N_("(extensions used during statistical analysis are marked)") EXTENSIONS_MARKED_TEXT = N_("(extensions used during statistical analysis are marked)")
class ExtensionsOutput(Outputable): class ExtensionsOutput(Outputable):
@staticmethod @staticmethod
def is_marked(extension): def is_marked(extension):
if extension in extensions.__extensions__ or "**" in extensions.__extensions__: if extension in extensions.__extensions__ or "**" in extensions.__extensions__:
return True return True
return False return False
def output_html(self): def output_html(self):
if extensions.__located_extensions__: if extensions.__located_extensions__:
extensions_xml = "<div><div class=\"box\">" extensions_xml = '<div><div class="box">'
extensions_xml += "<p>{0} {1}.</p><p>".format(_(EXTENSIONS_INFO_TEXT), _(EXTENSIONS_MARKED_TEXT)) extensions_xml += "<p>{0} {1}.</p><p>".format(_(EXTENSIONS_INFO_TEXT), _(EXTENSIONS_MARKED_TEXT))
for i in sorted(extensions.__located_extensions__): for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i): if ExtensionsOutput.is_marked(i):
extensions_xml += "<strong>" + i + "</strong>" extensions_xml += "<strong>" + i + "</strong>"
else: else:
extensions_xml += i extensions_xml += i
extensions_xml += " " extensions_xml += " "
extensions_xml += "</p></div></div>" extensions_xml += "</p></div></div>"
print(extensions_xml) print(extensions_xml)
def output_json(self): def output_json(self):
if extensions.__located_extensions__: if extensions.__located_extensions__:
message_json = "\t\t\t\"message\": \"" + _(EXTENSIONS_INFO_TEXT) + "\",\n" message_json = '\t\t\t"message": "' + _(EXTENSIONS_INFO_TEXT) + '",\n'
used_extensions_json = "" used_extensions_json = ""
unused_extensions_json = "" unused_extensions_json = ""
for i in sorted(extensions.__located_extensions__): for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i): if ExtensionsOutput.is_marked(i):
used_extensions_json += "\"" + i + "\", " used_extensions_json += '"' + i + '", '
else: else:
unused_extensions_json += "\"" + i + "\", " unused_extensions_json += '"' + i + '", '
used_extensions_json = used_extensions_json[:-2] used_extensions_json = used_extensions_json[:-2]
unused_extensions_json = unused_extensions_json[:-2] unused_extensions_json = unused_extensions_json[:-2]
print(",\n\t\t\"extensions\": {\n" + message_json + "\t\t\t\"used\": [ " + used_extensions_json + print(
" ],\n\t\t\t\"unused\": [ " + unused_extensions_json + " ]\n" + "\t\t}", end="") ',\n\t\t"extensions": {\n'
+ message_json
+ '\t\t\t"used": [ '
+ used_extensions_json
+ ' ],\n\t\t\t"unused": [ '
+ unused_extensions_json
+ " ]\n"
+ "\t\t}",
end="",
)
def output_text(self): def output_text(self):
if extensions.__located_extensions__: if extensions.__located_extensions__:
print("\n" + textwrap.fill("{0} {1}:".format(_(EXTENSIONS_INFO_TEXT), _(EXTENSIONS_MARKED_TEXT)), print(
width=terminal.get_size()[0])) "\n"
+ textwrap.fill(
"{0} {1}:".format(_(EXTENSIONS_INFO_TEXT), _(EXTENSIONS_MARKED_TEXT)), width=terminal.get_size()[0]
)
)
for i in sorted(extensions.__located_extensions__): for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i): if ExtensionsOutput.is_marked(i):
print("[" + terminal.__bold__ + i + terminal.__normal__ + "]", end=" ") print("[" + terminal.__bold__ + i + terminal.__normal__ + "]", end=" ")
else: else:
print (i, end=" ") print(i, end=" ")
print("") print("")
def output_xml(self): def output_xml(self):
if extensions.__located_extensions__: if extensions.__located_extensions__:
message_xml = "\t\t<message>" + _(EXTENSIONS_INFO_TEXT) + "</message>\n" message_xml = "\t\t<message>" + _(EXTENSIONS_INFO_TEXT) + "</message>\n"
used_extensions_xml = "" used_extensions_xml = ""
unused_extensions_xml = "" unused_extensions_xml = ""
for i in sorted(extensions.__located_extensions__): for i in sorted(extensions.__located_extensions__):
if ExtensionsOutput.is_marked(i): if ExtensionsOutput.is_marked(i):
used_extensions_xml += "\t\t\t<extension>" + i + "</extension>\n" used_extensions_xml += "\t\t\t<extension>" + i + "</extension>\n"
else: else:
unused_extensions_xml += "\t\t\t<extension>" + i + "</extension>\n" unused_extensions_xml += "\t\t\t<extension>" + i + "</extension>\n"
print("\t<extensions>\n" + message_xml + "\t\t<used>\n" + used_extensions_xml + "\t\t</used>\n" + print(
"\t\t<unused>\n" + unused_extensions_xml + "\t\t</unused>\n" + "\t</extensions>") "\t<extensions>\n"
+ message_xml
+ "\t\t<used>\n"
+ used_extensions_xml
+ "\t\t</used>\n"
+ "\t\t<unused>\n"
+ unused_extensions_xml
+ "\t\t</unused>\n"
+ "\t</extensions>"
)

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import textwrap import textwrap
from ..localization import N_ from ..localization import N_
from ..filtering import __filters__, has_filtered from ..filtering import __filters__, has_filtered
@ -26,96 +25,110 @@ from .. import terminal
from .outputable import Outputable from .outputable import Outputable
FILTERING_INFO_TEXT = N_("The following files were excluded from the statistics due to the specified exclusion patterns") FILTERING_INFO_TEXT = N_("The following files were excluded from the statistics due to the specified exclusion patterns")
FILTERING_AUTHOR_INFO_TEXT = N_("The following authors were excluded from the statistics due to the specified exclusion patterns") FILTERING_AUTHOR_INFO_TEXT = N_(
FILTERING_EMAIL_INFO_TEXT = N_("The authors with the following emails were excluded from the statistics due to the specified " \ "The following authors were excluded from the statistics due to the specified exclusion patterns"
"exclusion patterns") )
FILTERING_COMMIT_INFO_TEXT = N_("The following commit revisions were excluded from the statistics due to the specified " \ FILTERING_EMAIL_INFO_TEXT = N_(
"exclusion patterns") "The authors with the following emails were excluded from the statistics due to the specified " "exclusion patterns"
)
FILTERING_COMMIT_INFO_TEXT = N_(
"The following commit revisions were excluded from the statistics due to the specified " "exclusion patterns"
)
class FilteringOutput(Outputable): class FilteringOutput(Outputable):
@staticmethod @staticmethod
def __output_html_section__(info_string, filtered): def __output_html_section__(info_string, filtered):
filtering_xml = "" filtering_xml = ""
if filtered: if filtered:
filtering_xml += "<p>" + info_string + "."+ "</p>" filtering_xml += "<p>" + info_string + "." + "</p>"
for i in filtered: for i in filtered:
filtering_xml += "<p>" + i + "</p>" filtering_xml += "<p>" + i + "</p>"
return filtering_xml return filtering_xml
def output_html(self): def output_html(self):
if has_filtered(): if has_filtered():
filtering_xml = "<div><div class=\"box\">" filtering_xml = '<div><div class="box">'
FilteringOutput.__output_html_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1]) FilteringOutput.__output_html_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
FilteringOutput.__output_html_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1]) FilteringOutput.__output_html_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
FilteringOutput.__output_html_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1]) FilteringOutput.__output_html_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
FilteringOutput.__output_html_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1]) FilteringOutput.__output_html_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1])
filtering_xml += "</div></div>" filtering_xml += "</div></div>"
print(filtering_xml) print(filtering_xml)
@staticmethod @staticmethod
def __output_json_section__(info_string, filtered, container_tagname): def __output_json_section__(info_string, filtered, container_tagname):
if filtered: if filtered:
message_json = "\t\t\t\t\"message\": \"" + info_string + "\",\n" message_json = '\t\t\t\t"message": "' + info_string + '",\n'
filtering_json = "" filtering_json = ""
for i in filtered: for i in filtered:
filtering_json += "\t\t\t\t\t\"" + i + "\",\n" filtering_json += '\t\t\t\t\t"' + i + '",\n'
else: else:
filtering_json = filtering_json[:-3] filtering_json = filtering_json[:-3]
return "\n\t\t\t\"{0}\": {{\n".format(container_tagname) + message_json + \ return (
"\t\t\t\t\"entries\": [\n" + filtering_json + "\"\n\t\t\t\t]\n\t\t\t}," '\n\t\t\t"{0}": {{\n'.format(container_tagname)
+ message_json
+ '\t\t\t\t"entries": [\n'
+ filtering_json
+ '"\n\t\t\t\t]\n\t\t\t},'
)
return "" return ""
def output_json(self): def output_json(self):
if has_filtered(): if has_filtered():
output = ",\n\t\t\"filtering\": {" output = ',\n\t\t"filtering": {'
output += FilteringOutput.__output_json_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files") output += FilteringOutput.__output_json_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files")
output += FilteringOutput.__output_json_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors") output += FilteringOutput.__output_json_section__(
output += FilteringOutput.__output_json_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails") _(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors"
output += FilteringOutput.__output_json_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1], "revision") )
output = output[:-1] output += FilteringOutput.__output_json_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails")
output += "\n\t\t}" output += FilteringOutput.__output_json_section__(
print(output, end="") _(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1], "revision"
)
output = output[:-1]
output += "\n\t\t}"
print(output, end="")
@staticmethod @staticmethod
def __output_text_section__(info_string, filtered): def __output_text_section__(info_string, filtered):
if filtered: if filtered:
print("\n" + textwrap.fill(info_string + ":", width=terminal.get_size()[0])) print("\n" + textwrap.fill(info_string + ":", width=terminal.get_size()[0]))
for i in filtered: for i in filtered:
(width, _unused) = terminal.get_size() (width, _unused) = terminal.get_size()
print("...%s" % i[-width+3:] if len(i) > width else i) print("...%s" % i[-width + 3:] if len(i) > width else i)
def output_text(self): def output_text(self):
FilteringOutput.__output_text_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1]) FilteringOutput.__output_text_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
FilteringOutput.__output_text_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1]) FilteringOutput.__output_text_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
FilteringOutput.__output_text_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1]) FilteringOutput.__output_text_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
FilteringOutput.__output_text_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1]) FilteringOutput.__output_text_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1])
@staticmethod @staticmethod
def __output_xml_section__(info_string, filtered, container_tagname): def __output_xml_section__(info_string, filtered, container_tagname):
if filtered: if filtered:
message_xml = "\t\t\t<message>" + info_string + "</message>\n" message_xml = "\t\t\t<message>" + info_string + "</message>\n"
filtering_xml = "" filtering_xml = ""
for i in filtered: for i in filtered:
filtering_xml += "\t\t\t\t<entry>" + i + "</entry>\n" filtering_xml += "\t\t\t\t<entry>" + i + "</entry>\n"
print("\t\t<{0}>".format(container_tagname)) print("\t\t<{0}>".format(container_tagname))
print(message_xml + "\t\t\t<entries>\n" + filtering_xml + "\t\t\t</entries>\n") print(message_xml + "\t\t\t<entries>\n" + filtering_xml + "\t\t\t</entries>\n")
print("\t\t</{0}>".format(container_tagname)) print("\t\t</{0}>".format(container_tagname))
def output_xml(self): def output_xml(self):
if has_filtered(): if has_filtered():
print("\t<filtering>") print("\t<filtering>")
FilteringOutput.__output_xml_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files") FilteringOutput.__output_xml_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files")
FilteringOutput.__output_xml_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors") FilteringOutput.__output_xml_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors")
FilteringOutput.__output_xml_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails") FilteringOutput.__output_xml_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails")
FilteringOutput.__output_xml_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1], "revision") FilteringOutput.__output_xml_section__(_(FILTERING_COMMIT_INFO_TEXT), __filters__["revision"][1], "revision")
print("\t</filtering>") print("\t</filtering>")

View File

@ -18,143 +18,168 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from ..changes import FileDiff from ..changes import FileDiff
from ..localization import N_ from ..localization import N_
from ..metrics import (__metric_eloc__, METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD) from ..metrics import __metric_eloc__, METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD
from .outputable import Outputable from .outputable import Outputable
ELOC_INFO_TEXT = N_("The following files are suspiciously big (in order of severity)") ELOC_INFO_TEXT = N_("The following files are suspiciously big (in order of severity)")
CYCLOMATIC_COMPLEXITY_TEXT = N_("The following files have an elevated cyclomatic complexity (in order of severity)") CYCLOMATIC_COMPLEXITY_TEXT = N_("The following files have an elevated cyclomatic complexity (in order of severity)")
CYCLOMATIC_COMPLEXITY_DENSITY_TEXT = N_("The following files have an elevated cyclomatic complexity density " \ CYCLOMATIC_COMPLEXITY_DENSITY_TEXT = N_(
"(in order of severity)") "The following files have an elevated cyclomatic complexity density " "(in order of severity)"
)
METRICS_MISSING_INFO_TEXT = N_("No metrics violations were found in the repository") METRICS_MISSING_INFO_TEXT = N_("No metrics violations were found in the repository")
METRICS_VIOLATION_SCORES = [[1.0, "minimal"], [1.25, "minor"], [1.5, "medium"], [2.0, "bad"], [3.0, "severe"]] METRICS_VIOLATION_SCORES = [[1.0, "minimal"], [1.25, "minor"], [1.5, "medium"], [2.0, "bad"], [3.0, "severe"]]
def __get_metrics_score__(ceiling, value): def __get_metrics_score__(ceiling, value):
for i in reversed(METRICS_VIOLATION_SCORES): for i in reversed(METRICS_VIOLATION_SCORES):
if value > ceiling * i[0]: if value > ceiling * i[0]:
return i[1] return i[1]
class MetricsOutput(Outputable): class MetricsOutput(Outputable):
def __init__(self, metrics): def __init__(self, metrics):
self.metrics = metrics self.metrics = metrics
Outputable.__init__(self) Outputable.__init__(self)
def output_text(self): def output_text(self):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density: if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print("\n" + _(METRICS_MISSING_INFO_TEXT) + ".") print("\n" + _(METRICS_MISSING_INFO_TEXT) + ".")
if self.metrics.eloc: if self.metrics.eloc:
print("\n" + _(ELOC_INFO_TEXT) + ":") print("\n" + _(ELOC_INFO_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True): for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True):
print(_("{0} ({1} estimated lines of code)").format(i[1], str(i[0]))) print(_("{0} ({1} estimated lines of code)").format(i[1], str(i[0])))
if self.metrics.cyclomatic_complexity: if self.metrics.cyclomatic_complexity:
print("\n" + _(CYCLOMATIC_COMPLEXITY_TEXT) + ":") print("\n" + _(CYCLOMATIC_COMPLEXITY_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True): for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True):
print(_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0]))) print(_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0])))
if self.metrics.cyclomatic_complexity_density: if self.metrics.cyclomatic_complexity_density:
print("\n" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + ":") print("\n" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + ":")
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True): for i in sorted(
print(_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0])) set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True
):
print(_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0]))
def output_html(self): def output_html(self):
metrics_xml = "<div><div class=\"box\" id=\"metrics\">" metrics_xml = '<div><div class="box" id="metrics">'
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density: if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
metrics_xml += "<p>" + _(METRICS_MISSING_INFO_TEXT) + ".</p>" metrics_xml += "<p>" + _(METRICS_MISSING_INFO_TEXT) + ".</p>"
if self.metrics.eloc: if self.metrics.eloc:
metrics_xml += "<div><h4>" + _(ELOC_INFO_TEXT) + ".</h4>" metrics_xml += "<div><h4>" + _(ELOC_INFO_TEXT) + ".</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True)): for num, i in enumerate(sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True)):
metrics_xml += "<div class=\"" + __get_metrics_score__(__metric_eloc__[FileDiff.get_extension(i[1])], i[0]) + \ metrics_xml += (
(" odd\">" if num % 2 == 1 else "\">") + \ '<div class="'
_("{0} ({1} estimated lines of code)").format(i[1], str(i[0])) + "</div>" + __get_metrics_score__(__metric_eloc__[FileDiff.get_extension(i[1])], i[0])
metrics_xml += "</div>" + (' odd">' if num % 2 == 1 else '">')
+ _("{0} ({1} estimated lines of code)").format(i[1], str(i[0]))
+ "</div>"
)
metrics_xml += "</div>"
if self.metrics.cyclomatic_complexity: if self.metrics.cyclomatic_complexity:
metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_TEXT) + "</h4>" metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_TEXT) + "</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True)): for num, i in enumerate(
metrics_xml += "<div class=\"" + __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, i[0]) + \ sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True)
(" odd\">" if num % 2 == 1 else "\">") + \ ):
_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0])) + "</div>" metrics_xml += (
metrics_xml += "</div>" '<div class="'
+ __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, i[0])
+ (' odd">' if num % 2 == 1 else '">')
+ _("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0]))
+ "</div>"
)
metrics_xml += "</div>"
if self.metrics.cyclomatic_complexity_density: if self.metrics.cyclomatic_complexity_density:
metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + "</h4>" metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + "</h4>"
for num, i in enumerate(sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True)): for num, i in enumerate(
metrics_xml += "<div class=\"" + __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD, i[0]) + \ sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True)
(" odd\">" if num % 2 == 1 else "\">") + \ ):
_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0]) + "</div>" metrics_xml += (
metrics_xml += "</div>" '<div class="'
+ __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD, i[0])
+ (' odd">' if num % 2 == 1 else '">')
+ _("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0])
+ "</div>"
)
metrics_xml += "</div>"
metrics_xml += "</div></div>" metrics_xml += "</div></div>"
print(metrics_xml) print(metrics_xml)
def output_json(self): def output_json(self):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density: if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print(",\n\t\t\"metrics\": {\n\t\t\t\"message\": \"" + _(METRICS_MISSING_INFO_TEXT) + "\"\n\t\t}", end="") print(',\n\t\t"metrics": {\n\t\t\t"message": "' + _(METRICS_MISSING_INFO_TEXT) + '"\n\t\t}', end="")
else: else:
eloc_json = "" eloc_json = ""
if self.metrics.eloc: if self.metrics.eloc:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True): for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True):
eloc_json += "{\n\t\t\t\t\"type\": \"estimated-lines-of-code\",\n" eloc_json += '{\n\t\t\t\t"type": "estimated-lines-of-code",\n'
eloc_json += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n" eloc_json += '\t\t\t\t"file_name": "' + i[1] + '",\n'
eloc_json += "\t\t\t\t\"value\": " + str(i[0]) + "\n" eloc_json += '\t\t\t\t"value": ' + str(i[0]) + "\n"
eloc_json += "\t\t\t}," eloc_json += "\t\t\t},"
else: else:
if not self.metrics.cyclomatic_complexity: if not self.metrics.cyclomatic_complexity:
eloc_json = eloc_json[:-1] eloc_json = eloc_json[:-1]
if self.metrics.cyclomatic_complexity: if self.metrics.cyclomatic_complexity:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True): for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True):
eloc_json += "{\n\t\t\t\t\"type\": \"cyclomatic-complexity\",\n" eloc_json += '{\n\t\t\t\t"type": "cyclomatic-complexity",\n'
eloc_json += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n" eloc_json += '\t\t\t\t"file_name": "' + i[1] + '",\n'
eloc_json += "\t\t\t\t\"value\": " + str(i[0]) + "\n" eloc_json += '\t\t\t\t"value": ' + str(i[0]) + "\n"
eloc_json += "\t\t\t}," eloc_json += "\t\t\t},"
else: else:
if not self.metrics.cyclomatic_complexity_density: if not self.metrics.cyclomatic_complexity_density:
eloc_json = eloc_json[:-1] eloc_json = eloc_json[:-1]
if self.metrics.cyclomatic_complexity_density: if self.metrics.cyclomatic_complexity_density:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True): for i in sorted(
eloc_json += "{\n\t\t\t\t\"type\": \"cyclomatic-complexity-density\",\n" set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True
eloc_json += "\t\t\t\t\"file_name\": \"" + i[1] + "\",\n" ):
eloc_json += "\t\t\t\t\"value\": {0:.3f}\n".format(i[0]) eloc_json += '{\n\t\t\t\t"type": "cyclomatic-complexity-density",\n'
eloc_json += "\t\t\t}," eloc_json += '\t\t\t\t"file_name": "' + i[1] + '",\n'
else: eloc_json += '\t\t\t\t"value": {0:.3f}\n'.format(i[0])
eloc_json = eloc_json[:-1] eloc_json += "\t\t\t},"
else:
eloc_json = eloc_json[:-1]
print(",\n\t\t\"metrics\": {\n\t\t\t\"violations\": [\n\t\t\t" + eloc_json + "]\n\t\t}", end="") print(',\n\t\t"metrics": {\n\t\t\t"violations": [\n\t\t\t' + eloc_json + "]\n\t\t}", end="")
def output_xml(self):
if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
print("\t<metrics>\n\t\t<message>" + _(METRICS_MISSING_INFO_TEXT) + "</message>\n\t</metrics>")
else:
eloc_xml = ""
if self.metrics.eloc: def output_xml(self):
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True): if not self.metrics.eloc and not self.metrics.cyclomatic_complexity and not self.metrics.cyclomatic_complexity_density:
eloc_xml += "\t\t\t<estimated-lines-of-code>\n" print("\t<metrics>\n\t\t<message>" + _(METRICS_MISSING_INFO_TEXT) + "</message>\n\t</metrics>")
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n" else:
eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n" eloc_xml = ""
eloc_xml += "\t\t\t</estimated-lines-of-code>\n"
if self.metrics.cyclomatic_complexity: if self.metrics.eloc:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True): for i in sorted(set([(j, i) for (i, j) in list(self.metrics.eloc.items())]), reverse=True):
eloc_xml += "\t\t\t<cyclomatic-complexity>\n" eloc_xml += "\t\t\t<estimated-lines-of-code>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n" eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n" eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n"
eloc_xml += "\t\t\t</cyclomatic-complexity>\n" eloc_xml += "\t\t\t</estimated-lines-of-code>\n"
if self.metrics.cyclomatic_complexity_density: if self.metrics.cyclomatic_complexity:
for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True): for i in sorted(set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity.items())]), reverse=True):
eloc_xml += "\t\t\t<cyclomatic-complexity-density>\n" eloc_xml += "\t\t\t<cyclomatic-complexity>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n" eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>{0:.3f}</value>\n".format(i[0]) eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n"
eloc_xml += "\t\t\t</cyclomatic-complexity-density>\n" eloc_xml += "\t\t\t</cyclomatic-complexity>\n"
print("\t<metrics>\n\t\t<violations>\n" + eloc_xml + "\t\t</violations>\n\t</metrics>") if self.metrics.cyclomatic_complexity_density:
for i in sorted(
set([(j, i) for (i, j) in list(self.metrics.cyclomatic_complexity_density.items())]), reverse=True
):
eloc_xml += "\t\t\t<cyclomatic-complexity-density>\n"
eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n"
eloc_xml += "\t\t\t\t<value>{0:.3f}</value>\n".format(i[0])
eloc_xml += "\t\t\t</cyclomatic-complexity-density>\n"
print("\t<metrics>\n\t\t<violations>\n" + eloc_xml + "\t\t</violations>\n\t</metrics>")

View File

@ -18,28 +18,29 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from .. import format from .. import format
class Outputable(object): class Outputable(object):
def output_html(self): def output_html(self):
raise NotImplementedError(_("HTML output not yet supported in") + " \"" + self.__class__.__name__ + "\".") raise NotImplementedError(_("HTML output not yet supported in") + ' "' + self.__class__.__name__ + '".')
def output_json(self): def output_json(self):
raise NotImplementedError(_("JSON output not yet supported in") + " \"" + self.__class__.__name__ + "\".") raise NotImplementedError(_("JSON output not yet supported in") + ' "' + self.__class__.__name__ + '".')
def output_text(self): def output_text(self):
raise NotImplementedError(_("Text output not yet supported in") + " \"" + self.__class__.__name__ + "\".") raise NotImplementedError(_("Text output not yet supported in") + ' "' + self.__class__.__name__ + '".')
def output_xml(self):
raise NotImplementedError(_("XML output not yet supported in") + ' "' + self.__class__.__name__ + '".')
def output_xml(self):
raise NotImplementedError(_("XML output not yet supported in") + " \"" + self.__class__.__name__ + "\".")
def output(outputable): def output(outputable):
if format.get_selected() == "html" or format.get_selected() == "htmlembedded": if format.get_selected() == "html" or format.get_selected() == "htmlembedded":
outputable.output_html() outputable.output_html()
elif format.get_selected() == "json": elif format.get_selected() == "json":
outputable.output_json() outputable.output_json()
elif format.get_selected() == "text": elif format.get_selected() == "text":
outputable.output_text() outputable.output_text()
else: else:
outputable.output_xml() outputable.output_xml()

View File

@ -18,126 +18,130 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import textwrap import textwrap
from ..localization import N_ from ..localization import N_
from .. import format, gravatar, terminal from .. import format, gravatar, terminal
from .. import responsibilities as resp from .. import responsibilities as resp
from .outputable import Outputable from .outputable import Outputable
RESPONSIBILITIES_INFO_TEXT = N_("The following responsibilities, by author, were found in the current " RESPONSIBILITIES_INFO_TEXT = N_(
"revision of the repository (comments are excluded from the line count, " "The following responsibilities, by author, were found in the current "
"if possible)") "revision of the repository (comments are excluded from the line count, "
"if possible)"
)
MOSTLY_RESPONSIBLE_FOR_TEXT = N_("is mostly responsible for") MOSTLY_RESPONSIBLE_FOR_TEXT = N_("is mostly responsible for")
class ResponsibilitiesOutput(Outputable): class ResponsibilitiesOutput(Outputable):
def __init__(self, changes, blame): def __init__(self, changes, blame):
self.changes = changes self.changes = changes
self.blame = blame self.blame = blame
Outputable.__init__(self) Outputable.__init__(self)
def output_text(self): def output_text(self):
print("\n" + textwrap.fill(_(RESPONSIBILITIES_INFO_TEXT) + ":", width=terminal.get_size()[0])) print("\n" + textwrap.fill(_(RESPONSIBILITIES_INFO_TEXT) + ":", width=terminal.get_size()[0]))
for i in sorted(set(i[0] for i in self.blame.blames)): for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True) responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
if responsibilities: if responsibilities:
print("\n" + i, _(MOSTLY_RESPONSIBLE_FOR_TEXT) + ":") print("\n" + i, _(MOSTLY_RESPONSIBLE_FOR_TEXT) + ":")
for j, entry in enumerate(responsibilities): for j, entry in enumerate(responsibilities):
(width, _unused) = terminal.get_size() (width, _unused) = terminal.get_size()
width -= 7 width -= 7
print(str(entry[0]).rjust(6), end=" ") print(str(entry[0]).rjust(6), end=" ")
print("...%s" % entry[1][-width+3:] if len(entry[1]) > width else entry[1]) print("...%s" % entry[1][-width + 3:] if len(entry[1]) > width else entry[1])
if j >= 9: if j >= 9:
break break
def output_html(self): def output_html(self):
resp_xml = "<div><div class=\"box\" id=\"responsibilities\">" resp_xml = '<div><div class="box" id="responsibilities">'
resp_xml += "<p>" + _(RESPONSIBILITIES_INFO_TEXT) + ".</p>" resp_xml += "<p>" + _(RESPONSIBILITIES_INFO_TEXT) + ".</p>"
for i in sorted(set(i[0] for i in self.blame.blames)): for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True) responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
if responsibilities: if responsibilities:
resp_xml += "<div>" resp_xml += "<div>"
if format.get_selected() == "html": if format.get_selected() == "html":
author_email = self.changes.get_latest_email_by_author(i) author_email = self.changes.get_latest_email_by_author(i)
resp_xml += "<h3><img src=\"{0}\"/>{1} {2}</h3>".format(gravatar.get_url(author_email, size=32), resp_xml += '<h3><img src="{0}"/>{1} {2}</h3>'.format(
i, _(MOSTLY_RESPONSIBLE_FOR_TEXT)) gravatar.get_url(author_email, size=32), i, _(MOSTLY_RESPONSIBLE_FOR_TEXT)
else: )
resp_xml += "<h3>{0} {1}</h3>".format(i, _(MOSTLY_RESPONSIBLE_FOR_TEXT)) else:
resp_xml += "<h3>{0} {1}</h3>".format(i, _(MOSTLY_RESPONSIBLE_FOR_TEXT))
for j, entry in enumerate(responsibilities): for j, entry in enumerate(responsibilities):
resp_xml += "<div" + (" class=\"odd\">" if j % 2 == 1 else ">") + entry[1] + \ resp_xml += (
" (" + str(entry[0]) + " eloc)</div>" "<div" + (' class="odd">' if j % 2 == 1 else ">") + entry[1] + " (" + str(entry[0]) + " eloc)</div>"
if j >= 9: )
break if j >= 9:
break
resp_xml += "</div>" resp_xml += "</div>"
resp_xml += "</div></div>" resp_xml += "</div></div>"
print(resp_xml) print(resp_xml)
def output_json(self): def output_json(self):
message_json = "\t\t\t\"message\": \"" + _(RESPONSIBILITIES_INFO_TEXT) + "\",\n" message_json = '\t\t\t"message": "' + _(RESPONSIBILITIES_INFO_TEXT) + '",\n'
resp_json = "" resp_json = ""
for i in sorted(set(i[0] for i in self.blame.blames)): for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True) responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
if responsibilities: if responsibilities:
author_email = self.changes.get_latest_email_by_author(i) author_email = self.changes.get_latest_email_by_author(i)
resp_json += "{\n" resp_json += "{\n"
resp_json += "\t\t\t\t\"name\": \"" + i + "\",\n" resp_json += '\t\t\t\t"name": "' + i + '",\n'
resp_json += "\t\t\t\t\"email\": \"" + author_email + "\",\n" resp_json += '\t\t\t\t"email": "' + author_email + '",\n'
resp_json += "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n" resp_json += '\t\t\t\t"gravatar": "' + gravatar.get_url(author_email) + '",\n'
resp_json += "\t\t\t\t\"files\": [\n\t\t\t\t" resp_json += '\t\t\t\t"files": [\n\t\t\t\t'
for j, entry in enumerate(responsibilities): for j, entry in enumerate(responsibilities):
resp_json += "{\n" resp_json += "{\n"
resp_json += "\t\t\t\t\t\"name\": \"" + entry[1] + "\",\n" resp_json += '\t\t\t\t\t"name": "' + entry[1] + '",\n'
resp_json += "\t\t\t\t\t\"rows\": " + str(entry[0]) + "\n" resp_json += '\t\t\t\t\t"rows": ' + str(entry[0]) + "\n"
resp_json += "\t\t\t\t}," resp_json += "\t\t\t\t},"
if j >= 9: if j >= 9:
break break
resp_json = resp_json[:-1] resp_json = resp_json[:-1]
resp_json += "]\n\t\t\t}," resp_json += "]\n\t\t\t},"
resp_json = resp_json[:-1] resp_json = resp_json[:-1]
print(",\n\t\t\"responsibilities\": {\n" + message_json + "\t\t\t\"authors\": [\n\t\t\t" + resp_json + "]\n\t\t}", end="") print(',\n\t\t"responsibilities": {\n' + message_json + '\t\t\t"authors": [\n\t\t\t' + resp_json + "]\n\t\t}", end="")
def output_xml(self): def output_xml(self):
message_xml = "\t\t<message>" + _(RESPONSIBILITIES_INFO_TEXT) + "</message>\n" message_xml = "\t\t<message>" + _(RESPONSIBILITIES_INFO_TEXT) + "</message>\n"
resp_xml = "" resp_xml = ""
for i in sorted(set(i[0] for i in self.blame.blames)): for i in sorted(set(i[0] for i in self.blame.blames)):
responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True) responsibilities = sorted(((i[1], i[0]) for i in resp.Responsibilities.get(self.blame, i)), reverse=True)
if responsibilities: if responsibilities:
author_email = self.changes.get_latest_email_by_author(i) author_email = self.changes.get_latest_email_by_author(i)
resp_xml += "\t\t\t<author>\n" resp_xml += "\t\t\t<author>\n"
resp_xml += "\t\t\t\t<name>" + i + "</name>\n" resp_xml += "\t\t\t\t<name>" + i + "</name>\n"
resp_xml += "\t\t\t\t<email>" + author_email + "</email>\n" resp_xml += "\t\t\t\t<email>" + author_email + "</email>\n"
resp_xml += "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n" resp_xml += "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
resp_xml += "\t\t\t\t<files>\n" resp_xml += "\t\t\t\t<files>\n"
for j, entry in enumerate(responsibilities): for j, entry in enumerate(responsibilities):
resp_xml += "\t\t\t\t\t<file>\n" resp_xml += "\t\t\t\t\t<file>\n"
resp_xml += "\t\t\t\t\t\t<name>" + entry[1] + "</name>\n" resp_xml += "\t\t\t\t\t\t<name>" + entry[1] + "</name>\n"
resp_xml += "\t\t\t\t\t\t<rows>" + str(entry[0]) + "</rows>\n" resp_xml += "\t\t\t\t\t\t<rows>" + str(entry[0]) + "</rows>\n"
resp_xml += "\t\t\t\t\t</file>\n" resp_xml += "\t\t\t\t\t</file>\n"
if j >= 9: if j >= 9:
break break
resp_xml += "\t\t\t\t</files>\n" resp_xml += "\t\t\t\t</files>\n"
resp_xml += "\t\t\t</author>\n" resp_xml += "\t\t\t</author>\n"
print("\t<responsibilities>\n" + message_xml + "\t\t<authors>\n" + resp_xml + "\t\t</authors>\n\t</responsibilities>") print("\t<responsibilities>\n" + message_xml + "\t\t<authors>\n" + resp_xml + "\t\t</authors>\n\t</responsibilities>")

View File

@ -18,7 +18,6 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import textwrap import textwrap
from ..localization import N_ from ..localization import N_
from .. import format, gravatar, terminal, timeline from .. import format, gravatar, terminal, timeline
@ -27,182 +26,195 @@ from .outputable import Outputable
TIMELINE_INFO_TEXT = N_("The following history timeline has been gathered from the repository") TIMELINE_INFO_TEXT = N_("The following history timeline has been gathered from the repository")
MODIFIED_ROWS_TEXT = N_("Modified Rows:") MODIFIED_ROWS_TEXT = N_("Modified Rows:")
def __output_row__text__(timeline_data, periods, names): def __output_row__text__(timeline_data, periods, names):
print("\n" + terminal.__bold__ + terminal.ljust(_("Author"), 20), end=" ") print("\n" + terminal.__bold__ + terminal.ljust(_("Author"), 20), end=" ")
for period in periods: for period in periods:
print(terminal.rjust(period, 10), end=" ") print(terminal.rjust(period, 10), end=" ")
print(terminal.__normal__) print(terminal.__normal__)
for name in names: for name in names:
if timeline_data.is_author_in_periods(periods, name[0]): if timeline_data.is_author_in_periods(periods, name[0]):
print(terminal.ljust(name[0], 20)[0:20 - terminal.get_excess_column_count(name[0])], end=" ") print(terminal.ljust(name[0], 20)[0:20 - terminal.get_excess_column_count(name[0])], end=" ")
for period in periods: for period in periods:
multiplier = timeline_data.get_multiplier(period, 9) multiplier = timeline_data.get_multiplier(period, 9)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier) signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "-" + signs[0] * "+") signs_str = signs[1] * "-" + signs[0] * "+"
print (("." if timeline_data.is_author_in_period(period, name[0]) and print(
len(signs_str) == 0 else signs_str).rjust(10), end=" ") ("." if timeline_data.is_author_in_period(period, name[0]) and len(signs_str) == 0 else signs_str).rjust(
print("") 10
),
end=" ",
)
print("")
print(terminal.__bold__ + terminal.ljust(_(MODIFIED_ROWS_TEXT), 20) + terminal.__normal__, end=" ") print(terminal.__bold__ + terminal.ljust(_(MODIFIED_ROWS_TEXT), 20) + terminal.__normal__, end=" ")
for period in periods: for period in periods:
total_changes = str(timeline_data.get_total_changes_in_period(period)[2]) total_changes = str(timeline_data.get_total_changes_in_period(period)[2])
if hasattr(total_changes, 'decode'): if hasattr(total_changes, "decode"):
total_changes = total_changes.decode("utf-8", "replace") total_changes = total_changes.decode("utf-8", "replace")
print(terminal.rjust(total_changes, 10), end=" ") print(terminal.rjust(total_changes, 10), end=" ")
print("")
print("")
def __output_row__html__(timeline_data, periods, names): def __output_row__html__(timeline_data, periods, names):
timeline_xml = "<table class=\"git full\"><thead><tr><th>" + _("Author") + "</th>" timeline_xml = '<table class="git full"><thead><tr><th>' + _("Author") + "</th>"
for period in periods: for period in periods:
timeline_xml += "<th>" + str(period) + "</th>" timeline_xml += "<th>" + str(period) + "</th>"
timeline_xml += "</tr></thead><tbody>" timeline_xml += "</tr></thead><tbody>"
i = 0 i = 0
for name in names: for name in names:
if timeline_data.is_author_in_periods(periods, name[0]): if timeline_data.is_author_in_periods(periods, name[0]):
timeline_xml += "<tr" + (" class=\"odd\">" if i % 2 == 1 else ">") timeline_xml += "<tr" + (' class="odd">' if i % 2 == 1 else ">")
if format.get_selected() == "html": if format.get_selected() == "html":
timeline_xml += "<td><img src=\"{0}\"/>{1}</td>".format(gravatar.get_url(name[1]), name[0]) timeline_xml += '<td><img src="{0}"/>{1}</td>'.format(gravatar.get_url(name[1]), name[0])
else: else:
timeline_xml += "<td>" + name[0] + "</td>" timeline_xml += "<td>" + name[0] + "</td>"
for period in periods: for period in periods:
multiplier = timeline_data.get_multiplier(period, 18) multiplier = timeline_data.get_multiplier(period, 18)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier) signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "<div class=\"remove\">&nbsp;</div>" + signs[0] * "<div class=\"insert\">&nbsp;</div>") signs_str = signs[1] * '<div class="remove">&nbsp;</div>' + signs[0] * '<div class="insert">&nbsp;</div>'
timeline_xml += "<td>" + ("." if timeline_data.is_author_in_period(period, name[0]) and len(signs_str) == 0 else signs_str) timeline_xml += "<td>" + (
timeline_xml += "</td>" "." if timeline_data.is_author_in_period(period, name[0]) and len(signs_str) == 0 else signs_str
timeline_xml += "</tr>" )
i = i + 1 timeline_xml += "</td>"
timeline_xml += "</tr>"
i = i + 1
timeline_xml += "<tfoot><tr><td><strong>" + _(MODIFIED_ROWS_TEXT) + "</strong></td>" timeline_xml += "<tfoot><tr><td><strong>" + _(MODIFIED_ROWS_TEXT) + "</strong></td>"
for period in periods: for period in periods:
total_changes = timeline_data.get_total_changes_in_period(period) total_changes = timeline_data.get_total_changes_in_period(period)
timeline_xml += "<td>" + str(total_changes[2]) + "</td>" timeline_xml += "<td>" + str(total_changes[2]) + "</td>"
timeline_xml += "</tr></tfoot></tbody></table>"
print(timeline_xml)
timeline_xml += "</tr></tfoot></tbody></table>"
print(timeline_xml)
class TimelineOutput(Outputable): class TimelineOutput(Outputable):
def __init__(self, changes, useweeks): def __init__(self, changes, useweeks):
self.changes = changes self.changes = changes
self.useweeks = useweeks self.useweeks = useweeks
Outputable.__init__(self) Outputable.__init__(self)
def output_text(self): def output_text(self):
if self.changes.get_commits(): if self.changes.get_commits():
print("\n" + textwrap.fill(_(TIMELINE_INFO_TEXT) + ":", width=terminal.get_size()[0])) print("\n" + textwrap.fill(_(TIMELINE_INFO_TEXT) + ":", width=terminal.get_size()[0]))
timeline_data = timeline.TimelineData(self.changes, self.useweeks) timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods() periods = timeline_data.get_periods()
names = timeline_data.get_authors() names = timeline_data.get_authors()
(width, _unused) = terminal.get_size() (width, _unused) = terminal.get_size()
max_periods_per_row = int((width - 21) / 11) max_periods_per_row = int((width - 21) / 11)
for i in range(0, len(periods), max_periods_per_row): for i in range(0, len(periods), max_periods_per_row):
__output_row__text__(timeline_data, periods[i:i+max_periods_per_row], names) __output_row__text__(timeline_data, periods[i:i + max_periods_per_row], names)
def output_html(self): def output_html(self):
if self.changes.get_commits(): if self.changes.get_commits():
timeline_data = timeline.TimelineData(self.changes, self.useweeks) timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods() periods = timeline_data.get_periods()
names = timeline_data.get_authors() names = timeline_data.get_authors()
max_periods_per_row = 8 max_periods_per_row = 8
timeline_xml = "<div><div id=\"timeline\" class=\"box\">" timeline_xml = '<div><div id="timeline" class="box">'
timeline_xml += "<p>" + _(TIMELINE_INFO_TEXT) + ".</p>" timeline_xml += "<p>" + _(TIMELINE_INFO_TEXT) + ".</p>"
print(timeline_xml) print(timeline_xml)
for i in range(0, len(periods), max_periods_per_row): for i in range(0, len(periods), max_periods_per_row):
__output_row__html__(timeline_data, periods[i:i+max_periods_per_row], names) __output_row__html__(timeline_data, periods[i:i + max_periods_per_row], names)
timeline_xml = "</div></div>" timeline_xml = "</div></div>"
print(timeline_xml) print(timeline_xml)
def output_json(self): def output_json(self):
if self.changes.get_commits(): if self.changes.get_commits():
message_json = "\t\t\t\"message\": \"" + _(TIMELINE_INFO_TEXT) + "\",\n" message_json = '\t\t\t"message": "' + _(TIMELINE_INFO_TEXT) + '",\n'
timeline_json = "" timeline_json = ""
periods_json = "\t\t\t\"period_length\": \"{0}\",\n".format("week" if self.useweeks else "month") periods_json = '\t\t\t"period_length": "{0}",\n'.format("week" if self.useweeks else "month")
periods_json += "\t\t\t\"periods\": [\n\t\t\t" periods_json += '\t\t\t"periods": [\n\t\t\t'
timeline_data = timeline.TimelineData(self.changes, self.useweeks) timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods() periods = timeline_data.get_periods()
names = timeline_data.get_authors() names = timeline_data.get_authors()
for period in periods: for period in periods:
name_json = "\t\t\t\t\"name\": \"" + str(period) + "\",\n" name_json = '\t\t\t\t"name": "' + str(period) + '",\n'
authors_json = "\t\t\t\t\"authors\": [\n\t\t\t\t" authors_json = '\t\t\t\t"authors": [\n\t\t\t\t'
for name in names: for name in names:
if timeline_data.is_author_in_period(period, name[0]): if timeline_data.is_author_in_period(period, name[0]):
multiplier = timeline_data.get_multiplier(period, 24) multiplier = timeline_data.get_multiplier(period, 24)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier) signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "-" + signs[0] * "+") signs_str = signs[1] * "-" + signs[0] * "+"
if len(signs_str) == 0: if len(signs_str) == 0:
signs_str = "." signs_str = "."
authors_json += "{\n\t\t\t\t\t\"name\": \"" + name[0] + "\",\n" authors_json += '{\n\t\t\t\t\t"name": "' + name[0] + '",\n'
authors_json += "\t\t\t\t\t\"email\": \"" + name[1] + "\",\n" authors_json += '\t\t\t\t\t"email": "' + name[1] + '",\n'
authors_json += "\t\t\t\t\t\"gravatar\": \"" + gravatar.get_url(name[1]) + "\",\n" authors_json += '\t\t\t\t\t"gravatar": "' + gravatar.get_url(name[1]) + '",\n'
authors_json += "\t\t\t\t\t\"work\": \"" + signs_str + "\"\n\t\t\t\t}," authors_json += '\t\t\t\t\t"work": "' + signs_str + '"\n\t\t\t\t},'
else: else:
authors_json = authors_json[:-1] authors_json = authors_json[:-1]
authors_json += "],\n" authors_json += "],\n"
modified_rows_json = "\t\t\t\t\"modified_rows\": " + \ modified_rows_json = (
str(timeline_data.get_total_changes_in_period(period)[2]) + "\n" '\t\t\t\t"modified_rows": ' + str(timeline_data.get_total_changes_in_period(period)[2]) + "\n"
timeline_json += "{\n" + name_json + authors_json + modified_rows_json + "\t\t\t}," )
else: timeline_json += "{\n" + name_json + authors_json + modified_rows_json + "\t\t\t},"
timeline_json = timeline_json[:-1] else:
timeline_json = timeline_json[:-1]
print(",\n\t\t\"timeline\": {\n" + message_json + periods_json + timeline_json + "]\n\t\t}", end="") print(',\n\t\t"timeline": {\n' + message_json + periods_json + timeline_json + "]\n\t\t}", end="")
def output_xml(self): def output_xml(self):
if self.changes.get_commits(): if self.changes.get_commits():
message_xml = "\t\t<message>" + _(TIMELINE_INFO_TEXT) + "</message>\n" message_xml = "\t\t<message>" + _(TIMELINE_INFO_TEXT) + "</message>\n"
timeline_xml = "" timeline_xml = ""
periods_xml = "\t\t<periods length=\"{0}\">\n".format("week" if self.useweeks else "month") periods_xml = '\t\t<periods length="{0}">\n'.format("week" if self.useweeks else "month")
timeline_data = timeline.TimelineData(self.changes, self.useweeks) timeline_data = timeline.TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods() periods = timeline_data.get_periods()
names = timeline_data.get_authors() names = timeline_data.get_authors()
for period in periods: for period in periods:
name_xml = "\t\t\t\t<name>" + str(period) + "</name>\n" name_xml = "\t\t\t\t<name>" + str(period) + "</name>\n"
authors_xml = "\t\t\t\t<authors>\n" authors_xml = "\t\t\t\t<authors>\n"
for name in names: for name in names:
if timeline_data.is_author_in_period(period, name[0]): if timeline_data.is_author_in_period(period, name[0]):
multiplier = timeline_data.get_multiplier(period, 24) multiplier = timeline_data.get_multiplier(period, 24)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier) signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "-" + signs[0] * "+") signs_str = signs[1] * "-" + signs[0] * "+"
if len(signs_str) == 0: if len(signs_str) == 0:
signs_str = "." signs_str = "."
authors_xml += "\t\t\t\t\t<author>\n\t\t\t\t\t\t<name>" + name[0] + "</name>\n" authors_xml += "\t\t\t\t\t<author>\n\t\t\t\t\t\t<name>" + name[0] + "</name>\n"
authors_xml += "\t\t\t\t\t\t<email>" + name[1] + "</email>\n" authors_xml += "\t\t\t\t\t\t<email>" + name[1] + "</email>\n"
authors_xml += "\t\t\t\t\t\t<gravatar>" + gravatar.get_url(name[1]) + "</gravatar>\n" authors_xml += "\t\t\t\t\t\t<gravatar>" + gravatar.get_url(name[1]) + "</gravatar>\n"
authors_xml += "\t\t\t\t\t\t<work>" + signs_str + "</work>\n\t\t\t\t\t</author>\n" authors_xml += "\t\t\t\t\t\t<work>" + signs_str + "</work>\n\t\t\t\t\t</author>\n"
authors_xml += "\t\t\t\t</authors>\n" authors_xml += "\t\t\t\t</authors>\n"
modified_rows_xml = "\t\t\t\t<modified_rows>" + \ modified_rows_xml = (
str(timeline_data.get_total_changes_in_period(period)[2]) + "</modified_rows>\n" "\t\t\t\t<modified_rows>"
timeline_xml += "\t\t\t<period>\n" + name_xml + authors_xml + modified_rows_xml + "\t\t\t</period>\n" + str(timeline_data.get_total_changes_in_period(period)[2])
+ "</modified_rows>\n"
)
timeline_xml += "\t\t\t<period>\n" + name_xml + authors_xml + modified_rows_xml + "\t\t\t</period>\n"
print("\t<timeline>\n" + message_xml + periods_xml + timeline_xml + "\t\t</periods>\n\t</timeline>") print("\t<timeline>\n" + message_xml + periods_xml + timeline_xml + "\t\t</periods>\n\t</timeline>")

View File

@ -18,20 +18,19 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
class ResponsibiltyEntry(object): class ResponsibiltyEntry(object):
blames = {} blames = {}
class Responsibilities(object): class Responsibilities(object):
@staticmethod @staticmethod
def get(blame, author_name): def get(blame, author_name):
author_blames = {} author_blames = {}
for i in list(blame.blames.items()): for i in list(blame.blames.items()):
if author_name == i[0][0]: if author_name == i[0][0]:
total_rows = i[1].rows - i[1].comments total_rows = i[1].rows - i[1].comments
if total_rows > 0: if total_rows > 0:
author_blames[i[0][1]] = total_rows author_blames[i[0][1]] = total_rows
return sorted(author_blames.items()) return sorted(author_blames.items())

View File

@ -29,130 +29,151 @@ __normal__ = "\033[0;0m"
DEFAULT_TERMINAL_SIZE = (80, 25) DEFAULT_TERMINAL_SIZE = (80, 25)
def __get_size_windows__(): def __get_size_windows__():
res = None res = None
try: try:
from ctypes import windll, create_string_buffer from ctypes import windll, create_string_buffer
handler = windll.kernel32.GetStdHandle(-12) # stderr handler = windll.kernel32.GetStdHandle(-12) # stderr
csbi = create_string_buffer(22) csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(handler, csbi) res = windll.kernel32.GetConsoleScreenBufferInfo(handler, csbi)
except: except:
return DEFAULT_TERMINAL_SIZE return DEFAULT_TERMINAL_SIZE
if res:
import struct
(_, _, _, _, _, left, top, right, bottom, _, _) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
else:
return DEFAULT_TERMINAL_SIZE
if res:
import struct
(_, _, _, _, _, left, top, right, bottom, _, _) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
else:
return DEFAULT_TERMINAL_SIZE
def __get_size_linux__(): def __get_size_linux__():
def ioctl_get_window_size(file_descriptor): def ioctl_get_window_size(file_descriptor):
try: try:
import fcntl, termios, struct import fcntl, termios, struct
size = struct.unpack('hh', fcntl.ioctl(file_descriptor, termios.TIOCGWINSZ, "1234"))
except:
return DEFAULT_TERMINAL_SIZE
return size size = struct.unpack("hh", fcntl.ioctl(file_descriptor, termios.TIOCGWINSZ, "1234"))
except:
return DEFAULT_TERMINAL_SIZE
size = ioctl_get_window_size(0) or ioctl_get_window_size(1) or ioctl_get_window_size(2) return size
if not size: size = ioctl_get_window_size(0) or ioctl_get_window_size(1) or ioctl_get_window_size(2)
try:
file_descriptor = os.open(os.ctermid(), os.O_RDONLY) if not size:
size = ioctl_get_window_size(file_descriptor) try:
os.close(file_descriptor) file_descriptor = os.open(os.ctermid(), os.O_RDONLY)
except: size = ioctl_get_window_size(file_descriptor)
pass os.close(file_descriptor)
if not size: except:
try: pass
size = (os.environ["LINES"], os.environ["COLUMNS"]) if not size:
except: try:
return DEFAULT_TERMINAL_SIZE size = (os.environ["LINES"], os.environ["COLUMNS"])
except:
return DEFAULT_TERMINAL_SIZE
return int(size[1]), int(size[0])
return int(size[1]), int(size[0])
def clear_row(): def clear_row():
print("\r", end="") print("\r", end="")
def skip_escapes(skip): def skip_escapes(skip):
if skip: if skip:
global __bold__ global __bold__
global __normal__ global __normal__
__bold__ = "" __bold__ = ""
__normal__ = "" __normal__ = ""
def printb(string): def printb(string):
print(__bold__ + string + __normal__) print(__bold__ + string + __normal__)
def get_size(): def get_size():
width = 0 width = 0
height = 0 height = 0
if sys.stdout.isatty(): if sys.stdout.isatty():
current_os = platform.system() current_os = platform.system()
if current_os == "Windows": if current_os == "Windows":
(width, height) = __get_size_windows__() (width, height) = __get_size_windows__()
elif current_os == "Linux" or current_os == "Darwin" or current_os.startswith("CYGWIN"): elif current_os == "Linux" or current_os == "Darwin" or current_os.startswith("CYGWIN"):
(width, height) = __get_size_linux__() (width, height) = __get_size_linux__()
if width > 0: if width > 0:
return (width, height) return (width, height)
return DEFAULT_TERMINAL_SIZE
return DEFAULT_TERMINAL_SIZE
def set_stdout_encoding(): def set_stdout_encoding():
if not sys.stdout.isatty() and sys.version_info < (3,): if not sys.stdout.isatty() and sys.version_info < (3,):
sys.stdout = codecs.getwriter("utf-8")(sys.stdout) sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
def set_stdin_encoding(): def set_stdin_encoding():
if not sys.stdin.isatty() and sys.version_info < (3,): if not sys.stdin.isatty() and sys.version_info < (3,):
sys.stdin = codecs.getreader("utf-8")(sys.stdin) sys.stdin = codecs.getreader("utf-8")(sys.stdin)
def convert_command_line_to_utf8(): def convert_command_line_to_utf8():
try: try:
argv = [] argv = []
for arg in sys.argv: for arg in sys.argv:
argv.append(arg.decode(sys.stdin.encoding, "replace")) argv.append(arg.decode(sys.stdin.encoding, "replace"))
return argv
except AttributeError:
return sys.argv
return argv
except AttributeError:
return sys.argv
def check_terminal_encoding(): def check_terminal_encoding():
if sys.stdout.isatty() and (sys.stdout.encoding == None or sys.stdin.encoding == None): if sys.stdout.isatty() and (sys.stdout.encoding is None or sys.stdin.encoding is None):
print(_("WARNING: The terminal encoding is not correctly configured. gitinspector might malfunction. " print(
"The encoding can be configured with the environment variable 'PYTHONIOENCODING'."), file=sys.stderr) _(
"WARNING: The terminal encoding is not correctly configured. gitinspector might malfunction. "
"The encoding can be configured with the environment variable 'PYTHONIOENCODING'."
),
file=sys.stderr,
)
def get_excess_column_count(string): def get_excess_column_count(string):
width_mapping = {'F': 2, 'H': 1, 'W': 2, 'Na': 1, 'N': 1, 'A': 1} width_mapping = {"F": 2, "H": 1, "W": 2, "Na": 1, "N": 1, "A": 1}
result = 0 result = 0
for i in string: for i in string:
width = unicodedata.east_asian_width(i) width = unicodedata.east_asian_width(i)
result += width_mapping[width] result += width_mapping[width]
return result - len(string)
return result - len(string)
def ljust(string, pad): def ljust(string, pad):
return string.ljust(pad - get_excess_column_count(string)) return string.ljust(pad - get_excess_column_count(string))
def rjust(string, pad): def rjust(string, pad):
return string.rjust(pad - get_excess_column_count(string)) return string.rjust(pad - get_excess_column_count(string))
def output_progress(text, pos, length): def output_progress(text, pos, length):
if sys.stdout.isatty(): if sys.stdout.isatty():
(width, _unused) = get_size() (width, _unused) = get_size()
progress_text = text.format(100 * pos / length) progress_text = text.format(100 * pos / length)
if len(progress_text) > width: if len(progress_text) > width:
progress_text = "...%s" % progress_text[-width+3:] progress_text = "...%s" % progress_text[-width + 3:]
print("\r{0}\r{1}".format(" " * width, progress_text), end="") print("\r{0}\r{1}".format(" " * width, progress_text), end="")
sys.stdout.flush() sys.stdout.flush()

View File

@ -20,81 +20,81 @@
import datetime import datetime
class TimelineData(object): class TimelineData(object):
def __init__(self, changes, useweeks): def __init__(self, changes, useweeks):
authordateinfo_list = sorted(changes.get_authordateinfo_list().items()) authordateinfo_list = sorted(changes.get_authordateinfo_list().items())
self.changes = changes self.changes = changes
self.entries = {} self.entries = {}
self.total_changes_by_period = {} self.total_changes_by_period = {}
self.useweeks = useweeks self.useweeks = useweeks
for i in authordateinfo_list: for i in authordateinfo_list:
key = None key = None
if useweeks: if useweeks:
yearweek = datetime.date(int(i[0][0][0:4]), int(i[0][0][5:7]), int(i[0][0][8:10])).isocalendar() yearweek = datetime.date(int(i[0][0][0:4]), int(i[0][0][5:7]), int(i[0][0][8:10])).isocalendar()
key = (i[0][1], str(yearweek[0]) + "W" + "{0:02d}".format(yearweek[1])) key = (i[0][1], str(yearweek[0]) + "W" + "{0:02d}".format(yearweek[1]))
else: else:
key = (i[0][1], i[0][0][0:7]) key = (i[0][1], i[0][0][0:7])
if self.entries.get(key, None) == None: if self.entries.get(key, None) is None:
self.entries[key] = i[1] self.entries[key] = i[1]
else: else:
self.entries[key].insertions += i[1].insertions self.entries[key].insertions += i[1].insertions
self.entries[key].deletions += i[1].deletions self.entries[key].deletions += i[1].deletions
for period in self.get_periods(): for period in self.get_periods():
total_insertions = 0 total_insertions = 0
total_deletions = 0 total_deletions = 0
for author in self.get_authors(): for author in self.get_authors():
entry = self.entries.get((author[0], period), None) entry = self.entries.get((author[0], period), None)
if entry != None: if entry is not None:
total_insertions += entry.insertions total_insertions += entry.insertions
total_deletions += entry.deletions total_deletions += entry.deletions
self.total_changes_by_period[period] = (total_insertions, total_deletions, self.total_changes_by_period[period] = (total_insertions, total_deletions, total_insertions + total_deletions)
total_insertions + total_deletions)
def get_periods(self): def get_periods(self):
return sorted(set([i[1] for i in self.entries])) return sorted(set([i[1] for i in self.entries]))
def get_total_changes_in_period(self, period): def get_total_changes_in_period(self, period):
return self.total_changes_by_period[period] return self.total_changes_by_period[period]
def get_authors(self): def get_authors(self):
return sorted(set([(i[0][0], self.changes.get_latest_email_by_author(i[0][0])) for i in list(self.entries.items())])) return sorted(set([(i[0][0], self.changes.get_latest_email_by_author(i[0][0])) for i in list(self.entries.items())]))
def get_author_signs_in_period(self, author, period, multiplier): def get_author_signs_in_period(self, author, period, multiplier):
authorinfo = self.entries.get((author, period), None) authorinfo = self.entries.get((author, period), None)
total = float(self.total_changes_by_period[period][2]) total = float(self.total_changes_by_period[period][2])
if authorinfo: if authorinfo:
i = multiplier * (self.entries[(author, period)].insertions / total) i = multiplier * (self.entries[(author, period)].insertions / total)
j = multiplier * (self.entries[(author, period)].deletions / total) j = multiplier * (self.entries[(author, period)].deletions / total)
return (int(i), int(j)) return (int(i), int(j))
else: else:
return (0, 0) return (0, 0)
def get_multiplier(self, period, max_width): def get_multiplier(self, period, max_width):
multiplier = 0 multiplier = 0
while True: while True:
for i in self.entries: for i in self.entries:
entry = self.entries.get(i) entry = self.entries.get(i)
if period == i[1]: if period == i[1]:
changes_in_period = float(self.total_changes_by_period[i[1]][2]) changes_in_period = float(self.total_changes_by_period[i[1]][2])
if multiplier * (entry.insertions + entry.deletions) / changes_in_period > max_width: if multiplier * (entry.insertions + entry.deletions) / changes_in_period > max_width:
return multiplier return multiplier
multiplier += 0.25 multiplier += 0.25
def is_author_in_period(self, period, author): def is_author_in_period(self, period, author):
return self.entries.get((author, period), None) != None return self.entries.get((author, period), None) is not None
def is_author_in_periods(self, periods, author): def is_author_in_periods(self, periods, author):
for period in periods: for period in periods:
if self.is_author_in_period(period, author): if self.is_author_in_period(period, author):
return True return True
return False return False

View File

@ -18,17 +18,21 @@
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>. # along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from . import localization from . import localization
localization.init() localization.init()
__version__ = "0.5.0dev" __version__ = "0.5.0dev"
__doc__ = _("""Copyright © 2012-2015 Ejwa Software. All rights reserved. __doc__ = _(
"""Copyright © 2012-2015 Ejwa Software. All rights reserved.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>. License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
This is free software: you are free to change and redistribute it. This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law. There is NO WARRANTY, to the extent permitted by law.
Written by Adam Waldenberg.""") Written by Adam Waldenberg."""
)
def output(): def output():
print("gitinspector {0}\n".format(__version__) + __doc__) print("gitinspector {0}\n".format(__version__) + __doc__)

View File

@ -19,29 +19,31 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import os import os
import sys
import unittest import unittest
import gitinspector.comment import gitinspector.comment
def __test_extension__(commented_file, extension): def __test_extension__(commented_file, extension):
base = os.path.dirname(os.path.realpath(__file__)) base = os.path.dirname(os.path.realpath(__file__))
tex_file = open(base + commented_file, "r") tex_file = open(base + commented_file, "r")
tex = tex_file.readlines() tex = tex_file.readlines()
tex_file.close() tex_file.close()
is_inside_comment = False is_inside_comment = False
comment_counter = 0 comment_counter = 0
for i in tex: for i in tex:
(_, is_inside_comment) = gitinspector.comment.handle_comment_block(is_inside_comment, extension, i) (_, is_inside_comment) = gitinspector.comment.handle_comment_block(is_inside_comment, extension, i)
if is_inside_comment or gitinspector.comment.is_comment(extension, i): if is_inside_comment or gitinspector.comment.is_comment(extension, i):
comment_counter += 1 comment_counter += 1
return comment_counter
return comment_counter
class TexFileTest(unittest.TestCase): class TexFileTest(unittest.TestCase):
def test(self): def test(self):
comment_counter = __test_extension__("/resources/commented_file.tex", "tex") comment_counter = __test_extension__("/resources/commented_file.tex", "tex")
self.assertEqual(comment_counter, 30) self.assertEqual(comment_counter, 30)
class CppFileTest(unittest.TestCase): class CppFileTest(unittest.TestCase):
def test(self): def test(self):