HomeHarvest/homeharvest/__init__.py

188 lines
5.7 KiB
Python
Raw Permalink Normal View History

import pandas as pd
from typing import Union
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from .core.scrapers import ScraperInput
2023-09-15 15:17:37 -07:00
from .core.scrapers.redfin import RedfinScraper
2023-09-15 20:58:54 -07:00
from .core.scrapers.realtor import RealtorScraper
2023-09-17 13:06:31 -07:00
from .core.scrapers.zillow import ZillowScraper
2023-09-18 11:38:17 -07:00
from .core.scrapers.models import ListingType, Property, SiteName
2023-09-15 15:17:37 -07:00
from .exceptions import InvalidSite, InvalidListingType
_scrapers = {
"redfin": RedfinScraper,
2023-09-17 13:06:31 -07:00
"realtor.com": RealtorScraper,
"zillow": ZillowScraper,
2023-09-15 15:17:37 -07:00
}
2023-09-19 13:43:24 -07:00
def _validate_input(site_name: str, listing_type: str) -> None:
2023-09-15 15:21:29 -07:00
if site_name.lower() not in _scrapers:
2023-09-15 15:17:37 -07:00
raise InvalidSite(f"Provided site, '{site_name}', does not exist.")
if listing_type.upper() not in ListingType.__members__:
2023-09-19 19:13:20 -07:00
raise InvalidListingType(f"Provided listing type, '{listing_type}', does not exist.")
2023-09-15 15:17:37 -07:00
2023-09-19 13:43:24 -07:00
def _get_ordered_properties(result: Property) -> list[str]:
2023-09-18 11:38:17 -07:00
return [
"property_url",
"site_name",
"listing_type",
"property_type",
"status_text",
2023-09-19 19:13:20 -07:00
"baths_min",
"baths_max",
"beds_min",
"beds_max",
"sqft_min",
"sqft_max",
"price_min",
"price_max",
"unit_count",
2023-09-18 11:38:17 -07:00
"tax_assessed_value",
"price_per_sqft",
"lot_area_value",
"lot_area_unit",
2023-09-19 19:13:20 -07:00
"address_one",
"address_two",
2023-09-18 11:38:17 -07:00
"city",
"state",
"zip_code",
"posted_time",
2023-09-19 19:13:20 -07:00
"area_min",
2023-09-18 11:38:17 -07:00
"bldg_name",
"stories",
"year_built",
"agent_name",
"agent_phone",
"agent_email",
"days_on_market",
"sold_date",
2023-09-18 11:38:17 -07:00
"mls_id",
"img_src",
"latitude",
"longitude",
2023-09-19 13:01:39 -07:00
"description",
2023-09-18 11:38:17 -07:00
]
2023-09-19 13:43:24 -07:00
def _process_result(result: Property) -> pd.DataFrame:
2023-09-17 16:52:34 -07:00
prop_data = result.__dict__
prop_data["site_name"] = prop_data["site_name"].value
2023-09-18 11:38:17 -07:00
prop_data["listing_type"] = prop_data["listing_type"].value.lower()
if "property_type" in prop_data and prop_data["property_type"] is not None:
prop_data["property_type"] = prop_data["property_type"].value.lower()
else:
prop_data["property_type"] = None
2023-09-18 11:38:17 -07:00
if "address" in prop_data:
address_data = prop_data["address"]
2023-09-19 19:13:20 -07:00
prop_data["address_one"] = address_data.address_one
prop_data["address_two"] = address_data.address_two
2023-09-18 11:38:17 -07:00
prop_data["city"] = address_data.city
prop_data["state"] = address_data.state
prop_data["zip_code"] = address_data.zip_code
2023-09-17 16:52:34 -07:00
2023-09-18 11:38:17 -07:00
del prop_data["address"]
2023-09-17 16:52:34 -07:00
if "agent" in prop_data and prop_data["agent"] is not None:
agent_data = prop_data["agent"]
prop_data["agent_name"] = agent_data.name
prop_data["agent_phone"] = agent_data.phone
prop_data["agent_email"] = agent_data.email
del prop_data["agent"]
else:
prop_data["agent_name"] = None
prop_data["agent_phone"] = None
prop_data["agent_email"] = None
2023-09-17 16:52:34 -07:00
properties_df = pd.DataFrame([prop_data])
2023-09-19 13:43:24 -07:00
properties_df = properties_df[_get_ordered_properties(result)]
2023-09-17 16:52:34 -07:00
return properties_df
2023-09-19 19:13:20 -07:00
def _scrape_single_site(location: str, site_name: str, listing_type: str, proxy: str = None) -> pd.DataFrame:
2023-09-18 08:37:07 -07:00
"""
Helper function to scrape a single site.
2023-09-18 08:37:07 -07:00
"""
2023-09-19 13:43:24 -07:00
_validate_input(site_name, listing_type)
2023-09-17 16:52:34 -07:00
2023-09-15 15:17:37 -07:00
scraper_input = ScraperInput(
location=location,
listing_type=ListingType[listing_type.upper()],
site_name=SiteName.get_by_value(site_name.lower()),
2023-09-19 13:43:24 -07:00
proxy=proxy,
2023-09-15 15:17:37 -07:00
)
2023-09-15 15:21:29 -07:00
site = _scrapers[site_name.lower()](scraper_input)
2023-09-17 16:30:37 -07:00
results = site.search()
2023-09-15 15:17:37 -07:00
2023-09-19 13:43:24 -07:00
properties_dfs = [_process_result(result) for result in results]
2023-09-19 19:13:20 -07:00
properties_dfs = [df.dropna(axis=1, how="all") for df in properties_dfs if not df.empty]
2023-09-18 11:38:17 -07:00
if not properties_dfs:
return pd.DataFrame()
2023-09-17 16:30:37 -07:00
return pd.concat(properties_dfs, ignore_index=True)
def scrape_property(
location: str,
2023-09-18 15:22:43 -07:00
site_name: Union[str, list[str]] = None,
listing_type: str = "for_sale",
2023-09-19 13:43:24 -07:00
proxy: str = None,
2023-09-20 18:24:18 -07:00
keep_duplicates: bool = False
) -> pd.DataFrame:
"""
Scrape property from various sites from a given location and listing type.
:returns: pd.DataFrame
:param location: US Location (e.g. 'San Francisco, CA', 'Cook County, IL', '85281', '2530 Al Lipscomb Way')
:param site_name: Site name or list of site names (e.g. ['realtor.com', 'zillow'], 'redfin')
:param listing_type: Listing type (e.g. 'for_sale', 'for_rent', 'sold')
:return: pd.DataFrame containing properties
"""
if site_name is None:
site_name = list(_scrapers.keys())
if not isinstance(site_name, list):
site_name = [site_name]
2023-09-18 15:22:43 -07:00
results = []
if len(site_name) == 1:
2023-09-19 13:43:24 -07:00
final_df = _scrape_single_site(location, site_name[0], listing_type, proxy)
2023-09-18 15:22:43 -07:00
results.append(final_df)
else:
with ThreadPoolExecutor() as executor:
futures = {
2023-09-19 19:13:20 -07:00
executor.submit(_scrape_single_site, location, s_name, listing_type, proxy): s_name
2023-09-18 15:22:43 -07:00
for s_name in site_name
}
2023-09-18 15:22:43 -07:00
for future in concurrent.futures.as_completed(futures):
result = future.result()
results.append(result)
2023-09-18 19:02:12 -07:00
results = [df for df in results if not df.empty and not df.isna().all().all()]
if not results:
return pd.DataFrame()
2023-09-18 15:22:43 -07:00
final_df = pd.concat(results, ignore_index=True)
2023-09-19 19:13:20 -07:00
columns_to_track = ["address_one", "address_two", "city"]
#: validate they exist, otherwise create them
for col in columns_to_track:
if col not in final_df.columns:
final_df[col] = None
2023-09-20 18:24:18 -07:00
if not keep_duplicates:
final_df = final_df.drop_duplicates(subset=columns_to_track, keep="first")
return final_df