HomeHarvest/homeharvest/__init__.py

189 lines
5.5 KiB
Python
Raw Normal View History

import pandas as pd
from typing import Union
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from .core.scrapers import ScraperInput
2023-09-15 15:17:37 -07:00
from .core.scrapers.redfin import RedfinScraper
2023-09-15 20:58:54 -07:00
from .core.scrapers.realtor import RealtorScraper
2023-09-17 13:06:31 -07:00
from .core.scrapers.zillow import ZillowScraper
2023-09-18 11:38:17 -07:00
from .core.scrapers.models import ListingType, Property, SiteName
2023-09-15 15:17:37 -07:00
from .exceptions import InvalidSite, InvalidListingType
_scrapers = {
"redfin": RedfinScraper,
2023-09-17 13:06:31 -07:00
"realtor.com": RealtorScraper,
"zillow": ZillowScraper,
2023-09-15 15:17:37 -07:00
}
2023-09-19 13:43:24 -07:00
def _validate_input(site_name: str, listing_type: str) -> None:
2023-09-15 15:21:29 -07:00
if site_name.lower() not in _scrapers:
2023-09-15 15:17:37 -07:00
raise InvalidSite(f"Provided site, '{site_name}', does not exist.")
if listing_type.upper() not in ListingType.__members__:
2023-09-17 13:06:31 -07:00
raise InvalidListingType(
f"Provided listing type, '{listing_type}', does not exist."
)
2023-09-15 15:17:37 -07:00
2023-09-19 13:43:24 -07:00
def _get_ordered_properties(result: Property) -> list[str]:
2023-09-18 11:38:17 -07:00
return [
"property_url",
"site_name",
"listing_type",
"property_type",
"status_text",
"currency",
"price",
"apt_min_price",
2023-09-19 09:58:20 -07:00
"apt_max_price",
"apt_min_sqft",
"apt_max_sqft",
2023-09-19 10:27:13 -07:00
"apt_min_beds",
"apt_max_beds",
"apt_min_baths",
"apt_max_baths",
2023-09-18 11:38:17 -07:00
"tax_assessed_value",
"square_feet",
"price_per_sqft",
"beds",
"baths",
"lot_area_value",
"lot_area_unit",
"street_address",
"unit",
"city",
"state",
"zip_code",
"country",
"posted_time",
"bldg_min_beds",
"bldg_min_baths",
"bldg_min_area",
"bldg_unit_count",
"bldg_name",
"stories",
"year_built",
"agent_name",
"mls_id",
"img_src",
"latitude",
"longitude",
2023-09-19 13:01:39 -07:00
"description",
2023-09-18 11:38:17 -07:00
]
2023-09-19 13:43:24 -07:00
def _process_result(result: Property) -> pd.DataFrame:
2023-09-17 16:52:34 -07:00
prop_data = result.__dict__
prop_data["site_name"] = prop_data["site_name"].value
2023-09-18 11:38:17 -07:00
prop_data["listing_type"] = prop_data["listing_type"].value.lower()
if "property_type" in prop_data and prop_data["property_type"] is not None:
prop_data["property_type"] = prop_data["property_type"].value.lower()
else:
prop_data["property_type"] = None
2023-09-18 11:38:17 -07:00
if "address" in prop_data:
address_data = prop_data["address"]
prop_data["street_address"] = address_data.street_address
prop_data["unit"] = address_data.unit
prop_data["city"] = address_data.city
prop_data["state"] = address_data.state
prop_data["zip_code"] = address_data.zip_code
prop_data["country"] = address_data.country
2023-09-17 16:52:34 -07:00
2023-09-18 11:38:17 -07:00
del prop_data["address"]
2023-09-17 16:52:34 -07:00
properties_df = pd.DataFrame([prop_data])
2023-09-19 13:43:24 -07:00
properties_df = properties_df[_get_ordered_properties(result)]
2023-09-17 16:52:34 -07:00
return properties_df
def _scrape_single_site(
2023-09-19 13:43:24 -07:00
location: str, site_name: str, listing_type: str, proxy: str = None
2023-09-18 08:16:59 -07:00
) -> pd.DataFrame:
2023-09-18 08:37:07 -07:00
"""
Helper function to scrape a single site.
2023-09-18 08:37:07 -07:00
"""
2023-09-19 13:43:24 -07:00
_validate_input(site_name, listing_type)
2023-09-17 16:52:34 -07:00
2023-09-15 15:17:37 -07:00
scraper_input = ScraperInput(
location=location,
listing_type=ListingType[listing_type.upper()],
site_name=SiteName.get_by_value(site_name.lower()),
2023-09-19 13:43:24 -07:00
proxy=proxy,
2023-09-15 15:17:37 -07:00
)
2023-09-15 15:21:29 -07:00
site = _scrapers[site_name.lower()](scraper_input)
2023-09-17 16:30:37 -07:00
results = site.search()
2023-09-15 15:17:37 -07:00
2023-09-19 13:43:24 -07:00
properties_dfs = [_process_result(result) for result in results]
2023-09-18 20:59:49 -07:00
properties_dfs = [
df.dropna(axis=1, how="all") for df in properties_dfs if not df.empty
]
2023-09-18 11:38:17 -07:00
if not properties_dfs:
return pd.DataFrame()
2023-09-17 16:30:37 -07:00
return pd.concat(properties_dfs, ignore_index=True)
def scrape_property(
location: str,
2023-09-18 15:22:43 -07:00
site_name: Union[str, list[str]] = None,
listing_type: str = "for_sale",
2023-09-19 13:43:24 -07:00
proxy: str = None,
) -> pd.DataFrame:
"""
Scrape property from various sites from a given location and listing type.
:returns: pd.DataFrame
:param location: US Location (e.g. 'San Francisco, CA', 'Cook County, IL', '85281', '2530 Al Lipscomb Way')
:param site_name: Site name or list of site names (e.g. ['realtor.com', 'zillow'], 'redfin')
:param listing_type: Listing type (e.g. 'for_sale', 'for_rent', 'sold')
:return: pd.DataFrame containing properties
"""
if site_name is None:
site_name = list(_scrapers.keys())
if not isinstance(site_name, list):
site_name = [site_name]
2023-09-18 15:22:43 -07:00
results = []
if len(site_name) == 1:
2023-09-19 13:43:24 -07:00
final_df = _scrape_single_site(location, site_name[0], listing_type, proxy)
2023-09-18 15:22:43 -07:00
results.append(final_df)
else:
with ThreadPoolExecutor() as executor:
futures = {
2023-09-18 20:59:49 -07:00
executor.submit(
2023-09-19 13:43:24 -07:00
_scrape_single_site, location, s_name, listing_type, proxy
2023-09-18 20:59:49 -07:00
): s_name
2023-09-18 15:22:43 -07:00
for s_name in site_name
}
2023-09-18 15:22:43 -07:00
for future in concurrent.futures.as_completed(futures):
result = future.result()
results.append(result)
2023-09-18 19:02:12 -07:00
results = [df for df in results if not df.empty and not df.isna().all().all()]
if not results:
return pd.DataFrame()
2023-09-18 15:22:43 -07:00
final_df = pd.concat(results, ignore_index=True)
columns_to_track = ["street_address", "city", "unit"]
#: validate they exist, otherwise create them
for col in columns_to_track:
if col not in final_df.columns:
final_df[col] = None
2023-09-18 20:59:49 -07:00
final_df = final_df.drop_duplicates(
subset=["street_address", "city", "unit"], keep="first"
)
return final_df