- import requests
- import json
- import os
- # Tesco Portal API endpoints
- auth_url = "https://toolkit.tesco.com/partner/reports/api/v1/authenticate"
- download_url = "https://toolkit.tesco.com/partner/reports/api/v1/download-report"
- # Username and password for authentication
- username = "your_username"
- password = "your_password"
- # Generate UUID for authentication
- auth_token = str(uuid.uuid4())
- # Authentication request data
- data = {
- "username": username,
- "password": password
- }
- # Headers for authentication request with UUID token
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {auth_token}"
- }
- # Authenticate and get the session token
- response = requests.post(auth_url, json=data, headers=headers)
- # Check if authentication was successful
- if response.status_code == 200:
- print("Authentication successful!")
- session_token = response.json().get("sessionToken")
- # Set up headers for downloading files
- headers_download = {
- "Authorization": f"Bearer {session_token}"
- }
- # Get the latest report from the JSON response
- latest_report = max(response.json()["reports"], key=lambda x: x["createdOn"])
- # Extract filename and ID of the latest report
- report_name = latest_report["reportName"]
- report_id = latest_report["id"]
- # Download the latest report
- download_response = requests.get(f"{download_url}?reportId={report_id}", headers=headers_download)
- # Check if download was successful
- if download_response.status_code == 200:
- # Save the file to DBFS with the same name
- with open(f"/dbfs/{report_name}", "wb") as file:
- file.write(download_response.content)
- print(f"File '{report_name}' downloaded and saved in DBFS.")
- else:
- print("Failed to download the file.")
- else:
- print("Authentication failed. Please check your credentials.")
[text] K
Viewer
*** This page was generated with the meta tag "noindex, nofollow". This happened because you selected this option before saving or the system detected it as spam. This means that this page will never get into the search engines and the search bot will not crawl it. There is nothing to worry about, you can still share it with anyone.
Editor
You can edit this paste and save as new: