#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Import Modules
import asana
from asana.rest import ApiException
import pprint
from pprint import pprint
import csv
import pandas as pd
import subprocess
import time
import os
import signal
from csv_remapper import parse_key_value_csv

#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Set Variables
datasets = ["assemjobboard", "augersbuckets", "pickuplist", "optionlist"] # Will be used for file names associated to the dataset
projectid = ["1198183944538650", "1212561201738344", "1212561201738344", "1212561201738344"] # Unique project ID from asana project to be scraped, position in array relative to dataset name
completedsince = ["now", "now", "now", "2024-01-01T00:00:00.000Z"] # Time tasks completed since should show up
d = 0 # Dataset counter
i = 0 # Line counter
wait_time = 20

#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Define your identifier arrays
identifiers = [
    "1200319923593372",   # maps to status
    "due_on:",   # maps to due_date
    "1206595535112943",    # maps to assigned_to
    "name:",    # maps to name
    "1212497675921917",  #maps to size
    "1212561213046804",  #maps to header
    "1212561213387952",  #maps to bucket status
    "1212561213387966",  #maps to expected finish
    "1212561213387972",  #maps to auger status
    "1212552574389678", #maps to bucket style
    "1212561213050443", #maps to auger due
    "1212561213050449"  #maps to bucket due
]

labels = [
    "status",
    "due_date",
    "assigned_to",
    "name",
    "size",
    "header",
    "bucketstat",
    "expectedfinish",
    "augerstat",
    "bucketstyle",
    "augerdue",
    "bucketdue"
]

mapping = dict(zip(identifiers, labels)) # Create mapping dictionary

#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Configure Asana personal access token
configuration = asana.Configuration()
configuration.access_token = '2/1200204561560317/1207682396109602:dea9a953e8bd2cd87339c43e12429df1'
api_client = asana.ApiClient(configuration)

#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Loop to extract data for all projects seperatly
for values in datasets:

    #--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Create and clear Raw Data CSV
    filename = datasets[d] + "rawdata.csv" # Concatenate dataset name and rawdata for file name
    f = open(filename, "w")
    f.truncate()
    f.close()

    i = 0 # Reset Counter Variable

    #--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Create an instance of the API class
    tasks_api_instance = asana.TasksApi(api_client)
    project_gid = projectid[d] # Globally unique identifier for the project.
    opts = {
        'completed_since': completedsince[d], # str | Only return tasks that are either incomplete or that have been completed since this time. Accepts a date-time string or the keyword *now*. 
        'opt_fields': "name,custom_fields.display_value,due_on", # list[] | This endpoint returns a compact resource, which excludes some properties by default. To include those optional properties, set this query parameter to a comma-separated list of the properties you wish to include.
    }

    #--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Get tasks from a project
    try:
        # Get tasks from a project
        api_response = tasks_api_instance.get_tasks_for_project(project_gid, opts)
        
        for data in api_response:
            #pprint(data)
            i = i+1
            with open(filename, 'a', newline='') as csvfile:
                csvwriter = csv.writer(csvfile, delimiter=',')
                csvwriter.writerow([data])
            
    except ApiException as e:
        print("Exception when calling TasksApi->get_tasks_for_project: %s\n" % e)


    #--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Create and clear Clean Data CSV
    filename = datasets[d] + 'cleandata.csv' # Concatenate dataset name and clean data for file name
    f = open(filename, "w")
    f.truncate()
    f.close()

    #--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Clean garbage from raw data and write to Clean Data CSV
    df=pd.read_csv(datasets[d]+"rawdata.csv",names=[""])
    df=df.replace('{', '', regex=True)
    df=df.replace('}', '', regex=True)
    df=df.replace('\[', '', regex=True)
    df=df.replace('\]', '', regex=True)
    df=df.replace("'", '', regex=True)
    df=df.replace('"', '', regex=True)
    df=df.replace("gid: ", '', regex=True)
    df=df.replace("custom_fields: ", '', regex=True)
    df=df.replace("display_value: ", '', regex=True)
    df=df.replace("due_on:", 'due_on:,', regex=True)
    df=df.replace("name:", 'name:,', regex=True)
    df=df[""].str.split(',',expand=True)
    df.to_csv(filename, index=False)

    #--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Create and clear Formatted Data CSV
    filename = datasets[d] + "formatteddata.csv" # Concatenate dataset name and rawdata for file name
    f = open(filename, "w")
    f.truncate()
    f.close()

    #--------------------------------------------------------------------------------------------------------------------------------------------------------------------------# Populate Formatted Data CSV
    parse_key_value_csv(datasets[d]+"cleandata.csv", datasets[d]+"formatteddata.csv", mapping)


    d = d+1 # Increment the dataset array counter