HomeHarvest/homeharvest/__init__.py

108 lines
3.1 KiB
Python
Raw Normal View History

2023-09-15 15:17:37 -07:00
from .core.scrapers.redfin import RedfinScraper
2023-09-15 20:58:54 -07:00
from .core.scrapers.realtor import RealtorScraper
2023-09-17 13:06:31 -07:00
from .core.scrapers.zillow import ZillowScraper
2023-09-17 16:30:37 -07:00
from .core.scrapers.models import ListingType, Property, Building, SiteName
2023-09-15 15:17:37 -07:00
from .core.scrapers import ScraperInput
from .exceptions import InvalidSite, InvalidListingType
2023-09-17 13:06:31 -07:00
from typing import Union
2023-09-17 16:30:37 -07:00
import pandas as pd
2023-09-15 15:17:37 -07:00
_scrapers = {
"redfin": RedfinScraper,
2023-09-17 13:06:31 -07:00
"realtor.com": RealtorScraper,
"zillow": ZillowScraper,
2023-09-15 15:17:37 -07:00
}
2023-09-17 16:52:34 -07:00
def validate_input(site_name: str, listing_type: str) -> None:
2023-09-15 15:21:29 -07:00
if site_name.lower() not in _scrapers:
2023-09-15 15:17:37 -07:00
raise InvalidSite(f"Provided site, '{site_name}', does not exist.")
if listing_type.upper() not in ListingType.__members__:
2023-09-17 13:06:31 -07:00
raise InvalidListingType(
f"Provided listing type, '{listing_type}', does not exist."
)
2023-09-15 15:17:37 -07:00
2023-09-17 16:52:34 -07:00
def get_ordered_properties(result: Union[Building, Property]) -> list[str]:
if isinstance(result, Property):
return [
"listing_type",
"address_one",
"city",
"state",
"zip_code",
"address_two",
"url",
"property_type",
"price",
"beds",
"baths",
"square_feet",
"price_per_square_foot",
"lot_size",
"stories",
"year_built",
"agent_name",
"mls_id",
"description",
]
elif isinstance(result, Building):
return [
"address_one",
"city",
"state",
"zip_code",
"address_two",
"url",
"num_units",
"min_unit_price",
"max_unit_price",
"avg_unit_price",
"listing_type",
]
return []
def process_result(result: Union[Building, Property]) -> pd.DataFrame:
prop_data = result.__dict__
address_data = prop_data["address"]
2023-09-18 08:16:59 -07:00
prop_data["site_name"] = prop_data["site_name"]
2023-09-17 16:52:34 -07:00
prop_data["listing_type"] = prop_data["listing_type"].value
2023-09-18 08:26:35 -07:00
prop_data["property_type"] = prop_data["property_type"].value.lower() if prop_data.get("property_type") else None
2023-09-17 16:52:34 -07:00
prop_data["address_one"] = address_data.address_one
prop_data["city"] = address_data.city
prop_data["state"] = address_data.state
prop_data["zip_code"] = address_data.zip_code
prop_data["address_two"] = address_data.address_two
del prop_data["address"]
properties_df = pd.DataFrame([prop_data])
properties_df = properties_df[get_ordered_properties(result)]
return properties_df
def scrape_property(
location: str,
site_name: str,
listing_type: str = "for_sale", #: for_sale, for_rent, sold
2023-09-18 08:16:59 -07:00
) -> pd.DataFrame:
2023-09-17 16:52:34 -07:00
validate_input(site_name, listing_type)
2023-09-15 15:17:37 -07:00
scraper_input = ScraperInput(
location=location,
listing_type=ListingType[listing_type.upper()],
2023-09-18 08:16:59 -07:00
site_name=site_name.lower(),
2023-09-15 15:17:37 -07:00
)
2023-09-15 15:21:29 -07:00
site = _scrapers[site_name.lower()](scraper_input)
2023-09-17 16:30:37 -07:00
results = site.search()
2023-09-15 15:17:37 -07:00
2023-09-17 16:52:34 -07:00
properties_dfs = [process_result(result) for result in results]
2023-09-17 16:30:37 -07:00
return pd.concat(properties_dfs, ignore_index=True)