2023-09-18 14:18:22 -07:00
|
|
|
import pandas as pd
|
|
|
|
from typing import Union
|
|
|
|
import concurrent.futures
|
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
|
|
|
|
|
|
|
from .core.scrapers import ScraperInput
|
2023-10-03 22:21:16 -07:00
|
|
|
from .utils import process_result, ordered_properties
|
2023-09-15 20:58:54 -07:00
|
|
|
from .core.scrapers.realtor import RealtorScraper
|
2023-09-18 11:38:17 -07:00
|
|
|
from .core.scrapers.models import ListingType, Property, SiteName
|
2023-09-15 15:17:37 -07:00
|
|
|
from .exceptions import InvalidSite, InvalidListingType
|
|
|
|
|
|
|
|
|
|
|
|
_scrapers = {
|
2023-09-17 13:06:31 -07:00
|
|
|
"realtor.com": RealtorScraper,
|
2023-09-15 15:17:37 -07:00
|
|
|
}
|
|
|
|
|
2023-09-18 20:28:03 -07:00
|
|
|
|
2023-09-19 13:43:24 -07:00
|
|
|
def _validate_input(site_name: str, listing_type: str) -> None:
|
2023-09-15 15:21:29 -07:00
|
|
|
if site_name.lower() not in _scrapers:
|
2023-09-15 15:17:37 -07:00
|
|
|
raise InvalidSite(f"Provided site, '{site_name}', does not exist.")
|
|
|
|
|
|
|
|
if listing_type.upper() not in ListingType.__members__:
|
2023-09-19 19:13:20 -07:00
|
|
|
raise InvalidListingType(f"Provided listing type, '{listing_type}', does not exist.")
|
2023-09-15 15:17:37 -07:00
|
|
|
|
2023-09-18 20:28:03 -07:00
|
|
|
|
2023-10-03 15:05:17 -07:00
|
|
|
def _scrape_single_site(location: str, site_name: str, listing_type: str, radius: float, proxy: str = None, sold_last_x_days: int = None) -> pd.DataFrame:
|
2023-09-18 08:37:07 -07:00
|
|
|
"""
|
2023-09-18 14:18:22 -07:00
|
|
|
Helper function to scrape a single site.
|
2023-09-18 08:37:07 -07:00
|
|
|
"""
|
2023-09-19 13:43:24 -07:00
|
|
|
_validate_input(site_name, listing_type)
|
2023-09-17 16:52:34 -07:00
|
|
|
|
2023-09-15 15:17:37 -07:00
|
|
|
scraper_input = ScraperInput(
|
|
|
|
location=location,
|
|
|
|
listing_type=ListingType[listing_type.upper()],
|
2023-09-18 13:43:44 -07:00
|
|
|
site_name=SiteName.get_by_value(site_name.lower()),
|
2023-09-19 13:43:24 -07:00
|
|
|
proxy=proxy,
|
2023-10-02 13:58:47 -07:00
|
|
|
radius=radius,
|
2023-10-03 15:05:17 -07:00
|
|
|
sold_last_x_days=sold_last_x_days
|
2023-09-15 15:17:37 -07:00
|
|
|
)
|
|
|
|
|
2023-09-15 15:21:29 -07:00
|
|
|
site = _scrapers[site_name.lower()](scraper_input)
|
2023-09-17 16:30:37 -07:00
|
|
|
results = site.search()
|
2023-09-15 15:17:37 -07:00
|
|
|
|
2023-10-03 22:21:16 -07:00
|
|
|
properties_dfs = [process_result(result) for result in results]
|
2023-09-18 11:38:17 -07:00
|
|
|
if not properties_dfs:
|
|
|
|
return pd.DataFrame()
|
2023-09-17 16:30:37 -07:00
|
|
|
|
2023-10-03 22:21:16 -07:00
|
|
|
return pd.concat(properties_dfs, ignore_index=True, axis=0)[ordered_properties]
|
2023-09-18 14:18:22 -07:00
|
|
|
|
|
|
|
|
|
|
|
def scrape_property(
|
|
|
|
location: str,
|
2023-10-03 22:21:16 -07:00
|
|
|
#: site_name: Union[str, list[str]] = "realtor.com",
|
2023-09-18 14:18:22 -07:00
|
|
|
listing_type: str = "for_sale",
|
2023-10-02 13:58:47 -07:00
|
|
|
radius: float = None,
|
2023-10-03 15:05:17 -07:00
|
|
|
sold_last_x_days: int = None,
|
2023-09-19 13:43:24 -07:00
|
|
|
proxy: str = None,
|
2023-09-18 14:18:22 -07:00
|
|
|
) -> pd.DataFrame:
|
|
|
|
"""
|
|
|
|
Scrape property from various sites from a given location and listing type.
|
|
|
|
|
2023-10-03 15:05:17 -07:00
|
|
|
:param sold_last_x_days: Sold in last x days
|
2023-10-02 13:58:47 -07:00
|
|
|
:param radius: Radius in miles to find comparable properties on individual addresses
|
2023-10-02 10:28:13 -07:00
|
|
|
:param keep_duplicates:
|
|
|
|
:param proxy:
|
2023-09-18 14:18:22 -07:00
|
|
|
:param location: US Location (e.g. 'San Francisco, CA', 'Cook County, IL', '85281', '2530 Al Lipscomb Way')
|
|
|
|
:param site_name: Site name or list of site names (e.g. ['realtor.com', 'zillow'], 'redfin')
|
|
|
|
:param listing_type: Listing type (e.g. 'for_sale', 'for_rent', 'sold')
|
2023-10-02 10:28:13 -07:00
|
|
|
:returns: pd.DataFrame containing properties
|
2023-09-18 14:18:22 -07:00
|
|
|
"""
|
2023-10-03 22:21:16 -07:00
|
|
|
site_name = "realtor.com"
|
2023-10-02 10:28:13 -07:00
|
|
|
|
2023-09-18 14:18:22 -07:00
|
|
|
if site_name is None:
|
|
|
|
site_name = list(_scrapers.keys())
|
|
|
|
|
|
|
|
if not isinstance(site_name, list):
|
|
|
|
site_name = [site_name]
|
|
|
|
|
2023-09-18 15:22:43 -07:00
|
|
|
results = []
|
|
|
|
|
2023-09-18 14:18:22 -07:00
|
|
|
if len(site_name) == 1:
|
2023-10-03 15:05:17 -07:00
|
|
|
final_df = _scrape_single_site(location, site_name[0], listing_type, radius, proxy, sold_last_x_days)
|
2023-09-18 15:22:43 -07:00
|
|
|
results.append(final_df)
|
|
|
|
else:
|
|
|
|
with ThreadPoolExecutor() as executor:
|
|
|
|
futures = {
|
2023-10-03 15:05:17 -07:00
|
|
|
executor.submit(_scrape_single_site, location, s_name, listing_type, radius, proxy, sold_last_x_days): s_name
|
2023-09-18 15:22:43 -07:00
|
|
|
for s_name in site_name
|
|
|
|
}
|
2023-09-18 14:18:22 -07:00
|
|
|
|
2023-09-18 15:22:43 -07:00
|
|
|
for future in concurrent.futures.as_completed(futures):
|
|
|
|
result = future.result()
|
|
|
|
results.append(result)
|
2023-09-18 14:18:22 -07:00
|
|
|
|
2023-09-18 19:02:12 -07:00
|
|
|
results = [df for df in results if not df.empty and not df.isna().all().all()]
|
|
|
|
|
2023-09-18 14:18:22 -07:00
|
|
|
if not results:
|
|
|
|
return pd.DataFrame()
|
2023-09-18 15:22:43 -07:00
|
|
|
|
2023-09-18 14:18:22 -07:00
|
|
|
final_df = pd.concat(results, ignore_index=True)
|
2023-09-18 20:28:03 -07:00
|
|
|
|
2023-10-03 22:21:16 -07:00
|
|
|
columns_to_track = ["Street", "Unit", "Zip"]
|
2023-09-18 20:28:03 -07:00
|
|
|
|
|
|
|
#: validate they exist, otherwise create them
|
|
|
|
for col in columns_to_track:
|
|
|
|
if col not in final_df.columns:
|
|
|
|
final_df[col] = None
|
|
|
|
|
|
|
|
return final_df
|