Browse Source

add paging to labels; make output more easily parseable

master
poikilos 5 years ago
committed by Jacob Gustafson
parent
commit
b996372fe3
  1. 65
      utilities/enissue.py

65
utilities/enissue.py

@ -148,11 +148,13 @@ api_repo_url_fmt = "https://api.github.com/repos/{}/{}"
repo_url = api_repo_url_fmt.format(remote_user, repo_name) repo_url = api_repo_url_fmt.format(remote_user, repo_name)
issues_url = repo_url + "/issues" issues_url = repo_url + "/issues"
labels_url = repo_url + "/labels" labels_url = repo_url + "/labels"
page_size = 30 # The GitHub API only lists 30 issues per page.
p = "@ "
def get_issues(): def get_issues():
query_s = issues_url query_s = issues_url
c_issues_name_fmt = "issues_page={}{}.json" c_issues_name_fmt = "issues_page={}{}.json"
this_page = 1 this_page = 1
query_part = "" query_part = ""
# TODO: IF longer API query is implemented, put something in # TODO: IF longer API query is implemented, put something in
# query_part like "&label=Bucket_Game" # query_part like "&label=Bucket_Game"
@ -168,28 +170,36 @@ def get_issues():
c_issues_mtime = os.path.getmtime(c_issues_path) c_issues_mtime = os.path.getmtime(c_issues_path)
filetime = datetime.fromtimestamp(c_issues_mtime) filetime = datetime.fromtimestamp(c_issues_mtime)
if filetime > cache_delta: if filetime > cache_delta:
print("Loading cache: \"{}\"".format(c_issues_path)) print(p+"Loading cache: \"{}\"".format(c_issues_path))
# print("Cache time limit: {}".format(max_cache_delta) # print(p+"Cache time limit: {}".format(max_cache_delta)
print("Cache expires: {}".format(filetime print(p+"Cache expires: {}".format(filetime
+ max_cache_delta)) + max_cache_delta))
with open(c_issues_path) as json_file: with open(c_issues_path) as json_file:
results = json.load(json_file) results = json.load(json_file)
print("The cache has {} issue(s).".format(len(results))) # print(p+"The cache file has"
# " {} issue(s).".format(len(results)))
max_issue = None
for issue in results:
issue_n = issue.get("number")
if issue_n is not None:
if (max_issue is None) or (issue_n > max_issue):
max_issue = issue_n
print(p+"The highest cached issue# is {}.".format(max_issue))
return results return results
else: else:
print("Cache time limit: {}".format(max_cache_delta)) print(p+"Cache time limit: {}".format(max_cache_delta))
print("The cache has expired: \"{}\"".format(c_issues_path)) print(p+"The cache has expired: \"{}\"".format(c_issues_path))
else: else:
print("There is no cache for \"{}\"".format(c_issues_path)) print(p+"There is no cache for \"{}\"".format(c_issues_path))
if page is not None: if page is not None:
query_s = issues_url + "?page=" + str(page) query_s = issues_url + "?page=" + str(page)
try: try:
response = request.urlopen(query_s) response = request.urlopen(query_s)
except urllib.error.HTTPError as e: except urllib.error.HTTPError as e:
print("You may be able to view the issues on GitHub") print(p+"You may be able to view the issues on GitHub")
print("at the 'html_url', and a login may be required.") print(p+"at the 'html_url', and a login may be required.")
print("The URL \"{}\" is not accessible, so you may have" print(p+"The URL \"{}\" is not accessible, so you may have"
" exceeded the rate limit and be blocked" " exceeded the rate limit and be blocked"
" temporarily:".format(query_s)) " temporarily:".format(query_s))
print(str(e)) print(str(e))
@ -197,12 +207,14 @@ def get_issues():
response_s = decode_safe(response.read()) response_s = decode_safe(response.read())
if not os.path.isdir(c_repo_path): if not os.path.isdir(c_repo_path):
os.makedirs(c_repo_path) os.makedirs(c_repo_path)
print("Saving issues cache: {}".format(c_issues_path)) print(p+"Saving issues cache: {}".format(c_issues_path))
with open(c_issues_path, "w") as outs: with open(c_issues_path, "w") as outs:
outs.write(response_s) outs.write(response_s)
results = json.loads(response_s) results = json.loads(response_s)
return results return results
# TODO: get labels another way, and make this conditional:
# if cmd == "list":
issues = get_issues() issues = get_issues()
if issues is None: if issues is None:
exit(0) exit(0)
@ -211,7 +223,8 @@ labels = []
match_all_labels_lower = [] match_all_labels_lower = []
for s in match_all_labels: for s in match_all_labels:
# print("appending {} to match_all_labels_lower.".format(s.lower())) # print(p+"appending"
# " {} to match_all_labels_lower.".format(s.lower()))
match_all_labels_lower.append(s.lower()) match_all_labels_lower.append(s.lower())
match_count = 0 match_count = 0
@ -220,6 +233,8 @@ matching_issue = None
matching_issue_labels = None matching_issue_labels = None
# TODO: get labels another way, and make this conditional:
# if cmd == "list":
for issue in issues: for issue in issues:
this_issue_labels_lower = [] this_issue_labels_lower = []
for label in issue["labels"]: for label in issue["labels"]:
@ -255,7 +270,7 @@ for issue in issues:
if match_number == issue["number"]: if match_number == issue["number"]:
matching_issue = issue matching_issue = issue
matching_issue_labels = this_issue_labels_lower matching_issue_labels = this_issue_labels_lower
issue = None issue = None # prevent value from leaking
def show_issue(issue): def show_issue(issue):
@ -270,13 +285,13 @@ def show_issue(issue):
issue_data_bytes = response.read() issue_data_bytes = response.read()
except urllib.error.HTTPError as e: except urllib.error.HTTPError as e:
print(str(e)) print(str(e))
print("The URL \"{}\" is not accessible, so you may have" print(p+"The URL \"{}\" is not accessible, so you may have"
" exceeded the rate limit and be blocked" " exceeded the rate limit and be blocked"
" temporarily:".format(this_issue_json_url)) " temporarily:".format(this_issue_json_url))
html_url = issue.get("html_url") html_url = issue.get("html_url")
print("You may be able to view the issue on GitHub") print(p+"You may be able to view the issue on GitHub")
print("at the 'html_url', and a login may be required:") print(p+"at the 'html_url', and a login may be required:")
print("html_url: {}".format(html_url)) print(p+"html_url: {}".format(html_url))
return False return False
issue_data_s = decode_safe(issue_data_bytes) issue_data_s = decode_safe(issue_data_bytes)
issue_data = json.loads(issue_data_s) issue_data = json.loads(issue_data_s)
@ -345,6 +360,16 @@ if cmd == "labels":
print(label_s) print(label_s)
print("") print("")
print("The repo has {} label(s).".format(len(labels))) print("The repo has {} label(s).".format(len(labels)))
print("")
if total_count >= page_size:
print("The maximum issue count per page was reached.")
next_page = 2
if page is not None:
next_page = page + 1
print(" ./" + me + " labels page " + str(next_page))
print("to see labels on additional pages.")
elif cmd == "list": elif cmd == "list":
print() print()
if len(match_all_labels) > 0: if len(match_all_labels) > 0:
@ -352,7 +377,7 @@ elif cmd == "list":
match_count, match_count,
" + ".join("'{}'".format(s) for s in match_all_labels) " + ".join("'{}'".format(s) for s in match_all_labels)
)) ))
if total_count >= 30: if total_count >= page_size:
print("{} searched, which is the maximum number" print("{} searched, which is the maximum number"
" per page.".format(total_count)) " per page.".format(total_count))
next_page = 2 next_page = 2
@ -368,7 +393,7 @@ elif cmd == "list":
" {}.".format(match_count, page)) " {}.".format(match_count, page))
else: else:
print("{} issue(s) are showing.".format(match_count)) print("{} issue(s) are showing.".format(match_count))
if match_count >= 30: if total_count >= page_size:
print("That is the maximum number per page. Type") print("That is the maximum number per page. Type")
next_page = 2 next_page = 2
if page is not None: if page is not None:

Loading…
Cancel
Save