- import requests
- from bs4 import BeautifulSoup
- import csv
- def get_username_and_password():
- username = input("Enter your username: ")
- password = input("Enter your password: ")
- return username, password
- def login_and_get_week_ending_dates(username, password):
- try:
- s = requests.Session()
- s.auth = (username, password)
- login_url = "http://www.waitroseconnect.co.uk/names.nsf?Login"
- payload = {'username': username, 'password': password}
- s.post(login_url, data=payload)
- link = f"http://www.waitroseconnect.co.uk/waitroseconnect/reference/WaitroseConnectSalesV2.nsf/WeeklyDataDocsViewForm?OpenForm=&DBPath=domcfg.nsf&Server_Name=www.waitroseconnect.co.uk&Username={username}&Password={password}"
- response = s.get(link)
- soup = BeautifulSoup(response.content, 'html.parser')
- week_ending_dates = [date_elem.get_text().strip() for date_elem in soup.find_all("a", href=True) if date_elem.get_text().strip() and date_elem.get_text().strip() != "Page Help"]
- return week_ending_dates
- except Exception as e:
- print(f"Error occurred during login and fetching week ending dates: {e}")
- return []
- # The rest of the code remains the same
- def main():
- try:
- username, password = get_username_and_password()
- week_ending_dates = login_and_get_week_ending_dates(username, password)
- if not week_ending_dates:
- print("Failed to fetch week ending dates. Exiting...")
- return
- print("Select a Week Ending Date:")
- for i, date in enumerate(week_ending_dates[::-1], start=1):
- print(f"{i}. {date}")
- selected_index = int(input("Please enter the number corresponding to your desired week ending date: "))
- adjusted_index = len(week_ending_dates) - selected_index
- selected_date = week_ending_dates[adjusted_index]
- print(f"You selected: {selected_date}")
- corresponding_week_number = get_corresponding_week_number(selected_date, week_ending_dates)
- if corresponding_week_number == -1:
- print("Failed to find corresponding week number. Exiting...")
- return
- print(f"Corresponding number for {selected_date}: {corresponding_week_number}")
- url = construct_url_for_week_ending_date(selected_date, corresponding_week_number)
- if not url:
- print("Failed to construct URL. Exiting...")
- return
- response = get_response_from_url(url)
- if not response:
- print("Failed to fetch CSV data. Exiting...")
- return
- soup = BeautifulSoup(response.content, 'html.parser')
- plain_text = soup.get_text()
- weekendyear = selected_date.split("/")[2]
- csvfile = download_csv_sales_data(weekendyear, corresponding_week_number)
- if csvfile:
- entries = [line.split(",") for line in csvfile.strip().split("\n")]
- filename = f"WebLookupWeeklyLineBranchSalesData135186{weekendyear}{corresponding_week_number}.csv"
- save_to_csv(entries, filename)
- else:
- print("Failed to download the file.")
- except Exception as e:
- print(f"An error occurred: {e}")
- if __name__ == "__main__":
- main()
[text] S
Viewer
*** This page was generated with the meta tag "noindex, nofollow". This happened because you selected this option before saving or the system detected it as spam. This means that this page will never get into the search engines and the search bot will not crawl it. There is nothing to worry about, you can still share it with anyone.
Editor
You can edit this paste and save as new: