Last active
September 7, 2023 06:19
-
-
Save gerald-kim/3a1bd709dc49d53d66c7b720a3bdbe62 to your computer and use it in GitHub Desktop.
프로젝트 종료일 예측
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import requests | |
from requests.auth import HTTPBasicAuth | |
import pandas as pd | |
from datetime import datetime, timedelta | |
import numpy as np | |
import warnings | |
warnings.filterwarnings("ignore") | |
# Authentication for Jira API | |
USERNAME = "" # Put your Jira username here. | |
API_TOKEN = "" # Put your Jira API token here. | |
PROJECT = "" # Put your project key here. | |
DOMAIN = "" # Put your Jira domain here. | |
ISSUE_GROW_FACTOR = 1.2 # Adjust this factor as per your requirement. | |
# Jira API URL | |
url = f"https://{DOMAIN}.atlassian.net/rest/api/3/search" # Replace '[your_domain]' with your actual domain. | |
# Headers | |
headers = {"Accept": "application/json"} | |
# Query | |
query = { | |
"jql": f"project = '{PROJECT}' AND (resolution = Unresolved or resolved > startOfDay(-28d))", | |
"maxResults": 1000, # Maximum issues per request. Modify as per your needs. | |
"fields": ["key", "resolutiondate"] # Modify these fields as per your needs. | |
} | |
# Send a request to Jira API | |
response = requests.get(url, headers=headers, params=query, auth=HTTPBasicAuth(USERNAME, API_TOKEN)) | |
# Convert the response to JSON | |
data = response.json() | |
# Create a DataFrame from the issues | |
issues = pd.json_normalize(data['issues']) | |
# We have 'fields.created', 'fields.updated' and 'fields.resolved' in our dataframe. | |
# We will just consider 'fields.resolved' for your case. | |
issues['fields.resolutiondate'] = pd.to_datetime(issues['fields.resolutiondate']) | |
# Count unresolved issues | |
remaining_tasks = issues['fields.resolutiondate'].isna().sum() | |
remaining_tasks = remaining_tasks * ISSUE_GROW_FACTOR | |
recent_data = issues.copy() | |
# Group by week and count the tasks completed | |
recent_data.loc[:, 'week'] = recent_data['fields.resolutiondate'].dt.to_period('W') | |
recent_tasks_completed = recent_data.groupby('week').size().values | |
# Monte Carlo simulation | |
num_simulations = 1000 # Modify this number as per your needs. | |
simulation_results = [] | |
for _ in range(num_simulations): | |
total_weeks = 0 | |
remaining_tasks_simulation = remaining_tasks | |
while remaining_tasks_simulation > 0: | |
tasks_per_week = np.random.choice(recent_tasks_completed) | |
remaining_tasks_simulation -= tasks_per_week | |
total_weeks += 1 | |
simulation_results.append(total_weeks) | |
# Convert simulation results to dates | |
simulation_end_dates = [datetime.now() + timedelta(weeks=w) for w in simulation_results] | |
# Convert to DataFrame | |
dates_df = pd.DataFrame(simulation_end_dates, columns=['Date']) | |
# Count the number of each date and sort by date | |
cumulative_counts = dates_df['Date'].dt.date.value_counts().sort_index().cumsum() | |
# Calculate the cumulative percentages | |
cumulative_counts = cumulative_counts / len(simulation_end_dates) * 100 | |
# Print the cumulative counts | |
for date, percentage in cumulative_counts.iteritems(): | |
print(f'{date},{percentage}') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
This will draw a cummulative histo-diagram of the project's estimated completion date.