import csv
import random

import requests
import time
from bs4 import BeautifulSoup
from random import randint
from scrapingbee import ScrapingBeeClient

# Author: Md Md Farid

# Use API key. For privacy purposes, the key used during testing was deleted for file submission.
# tempBee = ScrapingBeeClient(api_key='INSERT KEY HERE')
print("Only use a link of a Yell webpage with data of a single company.")
raw = input("Put link here: ")

# Delay interval.
time.sleep(randint(3, 5))
r = requests.get(raw)  # Request HTML content without API key.
# r = tempBee.get(raw)  # Request HTML content with API key.
print(r)

# Extract company data following Yell UK format.
soup = BeautifulSoup(r.content, 'html.parser')

companyName = soup.find('h1', itemprop="name")
print(companyName)
companyRating = soup.find('span', class_="starRating--total")
print(companyRating)
companyNumber = soup.find('span', itemprop='telephone')
print(companyNumber)
companyHours = soup.findall('span', class_='business--openHours')

# File with format for further compilation.
filename = 'yellSingleExtract.csv'

# Structure the scraped data into a format suitable for the compilation program.
with open(filename, "w", newline='') as csvfile:
    fieldnames = ["name", "rating", "phone", "hours"]
    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
    writer.writeheader()
    writer.writerow({fieldnames[0]: companyName, fieldnames[1]: companyRating, fieldnames[2]: companyNumber, fieldnames[3]: companyHours})