Compare commits

...

10 Commits

9 changed files with 194 additions and 134 deletions

17
Dockerfile Normal file
View File

@@ -0,0 +1,17 @@
FROM python:3.9-slim-buster
# Install cron
RUN apt-get update && apt-get -y install cron
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
# Make entrypoint script executable
RUN chmod +x /app/entrypoint.sh
# Set the entrypoint
ENTRYPOINT ["/app/entrypoint.sh"]

10
config.py Normal file
View File

@@ -0,0 +1,10 @@
import os
from dotenv import load_dotenv
load_dotenv()
DOC_NAME = os.getenv("DOC_NAME")
MAYO_URL = os.getenv("MAYO_URL")
LOGIN = os.getenv("MAYO_LOGIN")
PASSWORD = os.getenv("MAYO_PASSWORD")
RESULT_DOC = "gitary 2025"

1
crontab Normal file
View File

@@ -0,0 +1 @@
0 8,13,18 * * * python /app/main.py >> /var/log/cron.log 2>&1

7
docker-compose.yml Normal file
View File

@@ -0,0 +1,7 @@
version: '3.8'
services:
gsheet-bot:
build: .
restart: unless-stopped
volumes:
- ./credentials.json:/app/credentials.json:ro

11
entrypoint.sh Normal file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
# Load the cron job
crontab /app/crontab
# Create the log file and set permissions
touch /var/log/cron.log
chmod 0666 /var/log/cron.log
# Start cron in the foreground
cron -f

144
main.py
View File

@@ -1,150 +1,26 @@
from gsheet_api import GSheetAPI
from mayo import MayoSession
from dotenv import load_dotenv
import os
import re
load_dotenv()
# --- konfiguracja ---
DOC_NAME = os.getenv("DOC_NAME")
MAYO_URL = os.getenv("MAYO_URL")
LOGIN = os.getenv("MAYO_LOGIN")
PASSWORD = os.getenv("MAYO_PASSWORD")
RESULT_DOC = "gitary 2025"
def normalize(text):
if not text:
return ""
return re.sub(r"\s+", "", text)
def get_finish_suffix(color):
"""Extracts the finish suffix (e.g., 'G', 'S', 'M') from a color string."""
if not color:
return None
return color.strip().split('-')[-1].upper()
def get_finish_type(row_data):
"""Determines the finish type (GLOSS, SATIN, MAT, MIX) based on color suffixes."""
try:
top_suffix = get_finish_suffix(row_data.get("color_top"))
body_suffix = get_finish_suffix(row_data.get("color_body"))
if not top_suffix and not body_suffix:
return None
# If one suffix is missing, assume it's the same as the other.
top_suffix = top_suffix or body_suffix
body_suffix = body_suffix or top_suffix
suffix_to_category = {
'G': 'gloss',
'S': 'satin',
'M': 'mat',
'R': 'mat',
'MAT': 'mat',
'RAW': 'mat',
}
top_category = suffix_to_category.get(top_suffix)
body_category = suffix_to_category.get(body_suffix)
if not top_category or not body_category:
return None # Suffix not in our map
if top_category == body_category:
return top_category.upper()
if 'mat' in {top_category, body_category}:
return 'MIX'
except (KeyError, AttributeError):
# This will catch if row_data is not a dict or keys are missing
return None
return None
from config import MAYO_URL, LOGIN, PASSWORD
from workflow import select_sheet, get_sheet_data, save_results
from processing import process_all_rows
def main():
# Inicjalizuj API raz na początku
gsheet_api = GSheetAPI()
print("📄 Pobieram listę arkuszy...")
try:
sheets = gsheet_api.list_sheets(DOC_NAME)
for i, name in enumerate(sheets):
print(f"{i+1}. {name}")
except Exception as e:
print(f"❌ Błąd podczas pobierania listy arkuszy: {e}")
sheet_name = select_sheet()
if not sheet_name:
return
sheet_name = input("\nWybierz arkusz do przetworzenia: ")
print(f"📋 Pobieram dane z arkusza: {sheet_name}")
try:
rows = gsheet_api.get_sheet_data(DOC_NAME, sheet_name)
except Exception as e:
print(f"❌ Błąd podczas pobierania danych z arkusza: {e}")
rows = get_sheet_data(gsheet_api, sheet_name)
if rows is None:
return
mayo = MayoSession(MAYO_URL, LOGIN, PASSWORD)
mayo.login()
rows_to_process = []
counter = 1
# Zakładamy: kolumna B = link, kolumna C = nr zam.
for row in rows[1:]:
if len(row) < 3:
continue # Pomiń wiersze, które nie mają wystarczającej liczby kolumn
processed_rows = process_all_rows(rows, mayo)
link = row[1]
nr_zam = row[2]
if not link:
continue
print(f"\n🔗 Sprawdzam: {link}")
try:
info = mayo.get_order_info(link)
order_number = info["order_number"]
model = info["model"]
print(f"Nr z arkusza: {nr_zam}")
print(f"Nr ze strony: {order_number}")
print(f"Model: {model}")
if normalize(order_number) == normalize(nr_zam):
print("✅ Numer się zgadza")
else:
print("⚠️ Numer NIE pasuje!")
row_data = [
counter,
link,
nr_zam,
model,
get_finish_type(info),
info.get("color_top"),
info.get("color_body"),
info.get("color_neck"),
info.get("color_head"),
info.get("finish_kc"),
info.get("finish_s"),
]
print(f"raw_data: {row_data}")
rows_to_process.append(row_data)
counter += 1
except Exception as e:
print(f"❌ Błąd podczas przetwarzania linku {link}: {e}")
# Po zakończeniu pętli, dodaj wszystkie zebrane wiersze za jednym razem
if rows_to_process:
print(f"\n\n--- Podsumowanie ---")
print(f"Zebrano {len(rows_to_process)} wierszy do przetworzenia.")
gsheet_api.batch_append_unique_rows(RESULT_DOC, sheet_name, rows_to_process)
else:
print("\nNie zebrano żadnych danych do przetworzenia.")
save_results(gsheet_api, sheet_name, processed_rows)
if __name__ == "__main__":
main()
main()

111
processing.py Normal file
View File

@@ -0,0 +1,111 @@
import re
def normalize(text):
if not text:
return ""
return re.sub(r"\s+", "", text)
def get_finish_suffix(color):
"""Extracts the finish suffix (e.g., 'G', 'S', 'M') from a color string."""
if not color:
return None
return color.strip().split('-')[-1].upper()
def get_finish_type(row_data):
"""Determines the finish type (GLOSS, SATIN, MAT, MIX) based on color suffixes."""
try:
top_suffix = get_finish_suffix(row_data.get("color_top"))
body_suffix = get_finish_suffix(row_data.get("color_body"))
if not top_suffix and not body_suffix:
return None
# If one suffix is missing, assume it's the same as the other.
top_suffix = top_suffix or body_suffix
body_suffix = body_suffix or top_suffix
suffix_to_category = {
'G': 'gloss',
'S': 'satin',
'M': 'mat',
'R': 'mat',
'MAT': 'mat',
'RAW': 'mat',
}
top_category = suffix_to_category.get(top_suffix)
body_category = suffix_to_category.get(body_suffix)
if not top_category or not body_category:
return None # Suffix not in our map
if top_category == body_category:
return top_category.upper()
if 'mat' in {top_category, body_category}:
return 'MIX'
except (KeyError, AttributeError):
# This will catch if row_data is not a dict or keys are missing
return None
return None
def process_row(row, mayo, counter):
"""Processes a single row from the sheet."""
if len(row) < 3:
return None # Skip rows with insufficient columns
link = row[1]
nr_zam = row[2]
if not link:
return None
print(f"\n🔗 Sprawdzam: {link}")
try:
info = mayo.get_order_info(link)
order_number = info["order_number"]
model = info["model"]
print(f"Nr z arkusza: {nr_zam}")
print(f"Nr ze strony: {order_number}")
print(f"Model: {model}")
if normalize(order_number) == normalize(nr_zam):
print("✅ Numer się zgadza")
else:
print("⚠️ Numer NIE pasuje!")
row_data = [
counter,
link,
nr_zam,
model,
get_finish_type(info),
info.get("color_top"),
info.get("color_body"),
info.get("color_neck"),
info.get("color_head"),
info.get("finish_kc"),
info.get("finish_s"),
]
print(f"raw_data: {row_data}")
return row_data
except Exception as e:
print(f"❌ Błąd podczas przetwarzania linku {link}: {e}")
return None
def process_all_rows(rows, mayo):
"""Processes all rows from the sheet."""
rows_to_process = []
counter = 1
# Skip header row by starting from index 1
for row in rows[1:]:
processed_row = process_row(row, mayo, counter)
if processed_row:
rows_to_process.append(processed_row)
counter += 1
return rows_to_process

27
workflow.py Normal file
View File

@@ -0,0 +1,27 @@
import datetime
from config import DOC_NAME, RESULT_DOC
def select_sheet():
"""Generates the sheet name based on the current month and year (MM.YYYY)."""
now = datetime.datetime.now()
sheet_name = now.strftime("%m.%Y")
print(f"📄 Automatycznie wybrano arkusz: {sheet_name}")
return sheet_name
def get_sheet_data(gsheet_api, sheet_name):
"""Fetches all data from a given sheet."""
print(f"📋 Pobieram dane z arkusza: {sheet_name}")
try:
return gsheet_api.get_sheet_data(DOC_NAME, sheet_name)
except Exception as e:
print(f"❌ Błąd podczas pobierania danych z arkusza: {e}")
return None
def save_results(gsheet_api, sheet_name, processed_rows):
"""Saves the processed rows to the spreadsheet."""
if processed_rows:
print(f"\n\n--- Podsumowanie ---")
print(f"Zebrano {len(processed_rows)} wierszy do przetworzenia.")
gsheet_api.batch_append_unique_rows(RESULT_DOC, sheet_name, processed_rows)
else:
print("\nNie zebrano żadnych danych do przetworzenia.")