Merge pull request #11 from ZacharyHampton/proxy_support

Proxy support
pull/13/head v0.2.4
Zachary Hampton 2023-09-19 13:50:14 -07:00 committed by GitHub
commit 9aaabdd5d8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 45 additions and 29 deletions

View File

@ -26,18 +26,19 @@ pip install --force-reinstall homeharvest
### CLI
```bash
homeharvest "San Francisco, CA" --site_name zillow realtor.com redfin --listing_type for_rent --output excel --filename HomeHarvest
homeharvest "San Francisco, CA" -s zillow realtor.com redfin -l for_rent -o excel -f HomeHarvest
```
This will scrape properties from the specified sites for the given location and listing type, and save the results to an Excel file named `HomeHarvest.xlsx`.
By default:
- If `--site_name` is not provided, it will scrape from all available sites.
- If `--listing_type` is left blank, the default is `for_sale`, other options are `for_rent` or `sold`.
- The `--output` default format is `excel`, options are `csv` or `excel`.
- If `--filename` is left blank, the default is `HomeHarvest_<current_timestamp>`
- If `-s` or `--site_name` is not provided, it will scrape from all available sites.
- If `-l` or `--listing_type` is left blank, the default is `for_sale`. Other options are `for_rent` or `sold`.
- The `-o` or `--output` default format is `excel`. Options are `csv` or `excel`.
- If `-f` or `--filename` is left blank, the default is `HomeHarvest_<current_timestamp>`.
- If `-p` or `--proxy` is not provided, the scraper uses the local IP.
### Python
```py
from homeharvest import scrape_property
import pandas as pd
@ -71,6 +72,7 @@ Required
└── listing_type (enum): for_rent, for_sale, sold
Optional
├── site_name (List[enum], default=all three sites): zillow, realtor.com, redfin
├── proxy (str): in format 'http://user:pass@host:port' or [https, socks]
```
### Property Schema

View File

@ -18,7 +18,7 @@ _scrapers = {
}
def validate_input(site_name: str, listing_type: str) -> None:
def _validate_input(site_name: str, listing_type: str) -> None:
if site_name.lower() not in _scrapers:
raise InvalidSite(f"Provided site, '{site_name}', does not exist.")
@ -28,7 +28,7 @@ def validate_input(site_name: str, listing_type: str) -> None:
)
def get_ordered_properties(result: Property) -> list[str]:
def _get_ordered_properties(result: Property) -> list[str]:
return [
"property_url",
"site_name",
@ -75,7 +75,7 @@ def get_ordered_properties(result: Property) -> list[str]:
]
def process_result(result: Property) -> pd.DataFrame:
def _process_result(result: Property) -> pd.DataFrame:
prop_data = result.__dict__
prop_data["site_name"] = prop_data["site_name"].value
@ -96,29 +96,30 @@ def process_result(result: Property) -> pd.DataFrame:
del prop_data["address"]
properties_df = pd.DataFrame([prop_data])
properties_df = properties_df[get_ordered_properties(result)]
properties_df = properties_df[_get_ordered_properties(result)]
return properties_df
def _scrape_single_site(
location: str, site_name: str, listing_type: str
location: str, site_name: str, listing_type: str, proxy: str = None
) -> pd.DataFrame:
"""
Helper function to scrape a single site.
"""
validate_input(site_name, listing_type)
_validate_input(site_name, listing_type)
scraper_input = ScraperInput(
location=location,
listing_type=ListingType[listing_type.upper()],
site_name=SiteName.get_by_value(site_name.lower()),
proxy=proxy,
)
site = _scrapers[site_name.lower()](scraper_input)
results = site.search()
properties_dfs = [process_result(result) for result in results]
properties_dfs = [_process_result(result) for result in results]
properties_dfs = [
df.dropna(axis=1, how="all") for df in properties_dfs if not df.empty
]
@ -132,6 +133,7 @@ def scrape_property(
location: str,
site_name: Union[str, list[str]] = None,
listing_type: str = "for_sale",
proxy: str = None,
) -> pd.DataFrame:
"""
Scrape property from various sites from a given location and listing type.
@ -151,13 +153,13 @@ def scrape_property(
results = []
if len(site_name) == 1:
final_df = _scrape_single_site(location, site_name[0], listing_type)
final_df = _scrape_single_site(location, site_name[0], listing_type, proxy)
results.append(final_df)
else:
with ThreadPoolExecutor() as executor:
futures = {
executor.submit(
_scrape_single_site, location, s_name, listing_type
_scrape_single_site, location, s_name, listing_type, proxy
): s_name
for s_name in site_name
}

View File

@ -8,36 +8,51 @@ def main():
parser.add_argument(
"location", type=str, help="Location to scrape (e.g., San Francisco, CA)"
)
parser.add_argument(
"-s",
"--site_name",
type=str,
nargs="*",
default=None,
help="Site name(s) to scrape from (e.g., realtor.com zillow)",
help="Site name(s) to scrape from (e.g., realtor, zillow)",
)
parser.add_argument(
"-l",
"--listing_type",
type=str,
default="for_sale",
choices=["for_sale", "for_rent", "sold"],
help="Listing type to scrape",
)
parser.add_argument(
"-o",
"--output",
type=str,
default="excel",
choices=["excel", "csv"],
help="Output format",
)
parser.add_argument(
"-f",
"--filename",
type=str,
default=None,
help="Name of the output file (without extension)",
)
parser.add_argument(
"-p", "--proxy", type=str, default=None, help="Proxy to use for scraping"
)
args = parser.parse_args()
result = scrape_property(args.location, args.site_name, args.listing_type)
result = scrape_property(
args.location, args.site_name, args.listing_type, proxy=args.proxy
)
if not args.filename:
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")

View File

@ -8,7 +8,7 @@ class ScraperInput:
location: str
listing_type: ListingType
site_name: SiteName
proxy_url: str | None = None
proxy: str | None = None
class Scraper:
@ -16,16 +16,10 @@ class Scraper:
self.location = scraper_input.location
self.listing_type = scraper_input.listing_type
self.session = requests.Session()
self.session = requests.Session(proxies=scraper_input.proxy)
self.listing_type = scraper_input.listing_type
self.site_name = scraper_input.site_name
if scraper_input.proxy_url:
self.session.proxies = {
"http": scraper_input.proxy_url,
"https": scraper_input.proxy_url,
}
def search(self) -> list[Property]:
...

View File

@ -1,6 +1,5 @@
import re
import json
import string
from .. import Scraper
from ....utils import parse_address_two, parse_unit
from ....exceptions import GeoCoordsNotFound, NoResultsFound
@ -32,7 +31,9 @@ class ZillowScraper(Scraper):
return response.json()["results"] != []
def search(self):
resp = self.session.get(self.url, headers=self._get_headers())
resp = self.session.get(
self.url, headers=self._get_headers()
)
resp.raise_for_status()
content = resp.text
@ -129,7 +130,9 @@ class ZillowScraper(Scraper):
"wants": {"cat1": ["mapResults"]},
"isDebugRequest": False,
}
resp = self.session.put(url, headers=self._get_headers(), json=payload)
resp = self.session.put(
url, headers=self._get_headers(), json=payload
)
resp.raise_for_status()
a = resp.json()
return self._parse_properties(resp.json())

View File

@ -1,6 +1,6 @@
[tool.poetry]
name = "homeharvest"
version = "0.2.3"
version = "0.2.4"
description = "Real estate scraping library supporting Zillow, Realtor.com & Redfin."
authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"]
homepage = "https://github.com/ZacharyHampton/HomeHarvest"