- cullen merge

This commit is contained in:
Zachary Hampton
2023-10-03 22:21:16 -07:00
parent 088088ae51
commit 29664e4eee
10 changed files with 258 additions and 996 deletions

View File

@@ -4,17 +4,14 @@ import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from .core.scrapers import ScraperInput
from .core.scrapers.redfin import RedfinScraper
from .utils import process_result, ordered_properties
from .core.scrapers.realtor import RealtorScraper
from .core.scrapers.zillow import ZillowScraper
from .core.scrapers.models import ListingType, Property, SiteName
from .exceptions import InvalidSite, InvalidListingType
_scrapers = {
"redfin": RedfinScraper,
"realtor.com": RealtorScraper,
"zillow": ZillowScraper,
}
@@ -26,86 +23,6 @@ def _validate_input(site_name: str, listing_type: str) -> None:
raise InvalidListingType(f"Provided listing type, '{listing_type}', does not exist.")
def _get_ordered_properties(result: Property) -> list[str]:
return [
"property_url",
"site_name",
"listing_type",
"property_type",
"status_text",
"baths_min",
"baths_max",
"beds_min",
"beds_max",
"sqft_min",
"sqft_max",
"price_min",
"price_max",
"unit_count",
"tax_assessed_value",
"price_per_sqft",
"lot_area_value",
"lot_area_unit",
"address_one",
"address_two",
"city",
"state",
"zip_code",
"posted_time",
"area_min",
"bldg_name",
"stories",
"year_built",
"agent_name",
"agent_phone",
"agent_email",
"days_on_market",
"sold_date",
"mls_id",
"img_src",
"latitude",
"longitude",
"description",
]
def _process_result(result: Property) -> pd.DataFrame:
prop_data = result.__dict__
prop_data["site_name"] = prop_data["site_name"].value
prop_data["listing_type"] = prop_data["listing_type"].value.lower()
if "property_type" in prop_data and prop_data["property_type"] is not None:
prop_data["property_type"] = prop_data["property_type"].value.lower()
else:
prop_data["property_type"] = None
if "address" in prop_data:
address_data = prop_data["address"]
prop_data["address_one"] = address_data.address_one
prop_data["address_two"] = address_data.address_two
prop_data["city"] = address_data.city
prop_data["state"] = address_data.state
prop_data["zip_code"] = address_data.zip_code
del prop_data["address"]
if "agent" in prop_data and prop_data["agent"] is not None:
agent_data = prop_data["agent"]
prop_data["agent_name"] = agent_data.name
prop_data["agent_phone"] = agent_data.phone
prop_data["agent_email"] = agent_data.email
del prop_data["agent"]
else:
prop_data["agent_name"] = None
prop_data["agent_phone"] = None
prop_data["agent_email"] = None
properties_df = pd.DataFrame([prop_data])
properties_df = properties_df[_get_ordered_properties(result)]
return properties_df
def _scrape_single_site(location: str, site_name: str, listing_type: str, radius: float, proxy: str = None, sold_last_x_days: int = None) -> pd.DataFrame:
"""
Helper function to scrape a single site.
@@ -124,22 +41,20 @@ def _scrape_single_site(location: str, site_name: str, listing_type: str, radius
site = _scrapers[site_name.lower()](scraper_input)
results = site.search()
properties_dfs = [_process_result(result) for result in results]
properties_dfs = [df.dropna(axis=1, how="all") for df in properties_dfs if not df.empty]
properties_dfs = [process_result(result) for result in results]
if not properties_dfs:
return pd.DataFrame()
return pd.concat(properties_dfs, ignore_index=True)
return pd.concat(properties_dfs, ignore_index=True, axis=0)[ordered_properties]
def scrape_property(
location: str,
site_name: Union[str, list[str]] = "realtor.com",
#: site_name: Union[str, list[str]] = "realtor.com",
listing_type: str = "for_sale",
radius: float = None,
sold_last_x_days: int = None,
proxy: str = None,
keep_duplicates: bool = False
) -> pd.DataFrame:
"""
Scrape property from various sites from a given location and listing type.
@@ -153,6 +68,7 @@ def scrape_property(
:param listing_type: Listing type (e.g. 'for_sale', 'for_rent', 'sold')
:returns: pd.DataFrame containing properties
"""
site_name = "realtor.com"
if site_name is None:
site_name = list(_scrapers.keys())
@@ -183,13 +99,11 @@ def scrape_property(
final_df = pd.concat(results, ignore_index=True)
columns_to_track = ["address_one", "address_two", "city"]
columns_to_track = ["Street", "Unit", "Zip"]
#: validate they exist, otherwise create them
for col in columns_to_track:
if col not in final_df.columns:
final_df[col] = None
if not keep_duplicates:
final_df = final_df.drop_duplicates(subset=columns_to_track, keep="first")
return final_df