commit
9aaabdd5d8
14
README.md
14
README.md
|
@ -26,18 +26,19 @@ pip install --force-reinstall homeharvest
|
||||||
### CLI
|
### CLI
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
homeharvest "San Francisco, CA" --site_name zillow realtor.com redfin --listing_type for_rent --output excel --filename HomeHarvest
|
homeharvest "San Francisco, CA" -s zillow realtor.com redfin -l for_rent -o excel -f HomeHarvest
|
||||||
```
|
```
|
||||||
|
|
||||||
This will scrape properties from the specified sites for the given location and listing type, and save the results to an Excel file named `HomeHarvest.xlsx`.
|
This will scrape properties from the specified sites for the given location and listing type, and save the results to an Excel file named `HomeHarvest.xlsx`.
|
||||||
|
|
||||||
By default:
|
By default:
|
||||||
- If `--site_name` is not provided, it will scrape from all available sites.
|
- If `-s` or `--site_name` is not provided, it will scrape from all available sites.
|
||||||
- If `--listing_type` is left blank, the default is `for_sale`, other options are `for_rent` or `sold`.
|
- If `-l` or `--listing_type` is left blank, the default is `for_sale`. Other options are `for_rent` or `sold`.
|
||||||
- The `--output` default format is `excel`, options are `csv` or `excel`.
|
- The `-o` or `--output` default format is `excel`. Options are `csv` or `excel`.
|
||||||
- If `--filename` is left blank, the default is `HomeHarvest_<current_timestamp>`
|
- If `-f` or `--filename` is left blank, the default is `HomeHarvest_<current_timestamp>`.
|
||||||
|
- If `-p` or `--proxy` is not provided, the scraper uses the local IP.
|
||||||
### Python
|
### Python
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from homeharvest import scrape_property
|
from homeharvest import scrape_property
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
@ -71,6 +72,7 @@ Required
|
||||||
└── listing_type (enum): for_rent, for_sale, sold
|
└── listing_type (enum): for_rent, for_sale, sold
|
||||||
Optional
|
Optional
|
||||||
├── site_name (List[enum], default=all three sites): zillow, realtor.com, redfin
|
├── site_name (List[enum], default=all three sites): zillow, realtor.com, redfin
|
||||||
|
├── proxy (str): in format 'http://user:pass@host:port' or [https, socks]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Property Schema
|
### Property Schema
|
||||||
|
|
|
@ -18,7 +18,7 @@ _scrapers = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def validate_input(site_name: str, listing_type: str) -> None:
|
def _validate_input(site_name: str, listing_type: str) -> None:
|
||||||
if site_name.lower() not in _scrapers:
|
if site_name.lower() not in _scrapers:
|
||||||
raise InvalidSite(f"Provided site, '{site_name}', does not exist.")
|
raise InvalidSite(f"Provided site, '{site_name}', does not exist.")
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ def validate_input(site_name: str, listing_type: str) -> None:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_ordered_properties(result: Property) -> list[str]:
|
def _get_ordered_properties(result: Property) -> list[str]:
|
||||||
return [
|
return [
|
||||||
"property_url",
|
"property_url",
|
||||||
"site_name",
|
"site_name",
|
||||||
|
@ -75,7 +75,7 @@ def get_ordered_properties(result: Property) -> list[str]:
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def process_result(result: Property) -> pd.DataFrame:
|
def _process_result(result: Property) -> pd.DataFrame:
|
||||||
prop_data = result.__dict__
|
prop_data = result.__dict__
|
||||||
|
|
||||||
prop_data["site_name"] = prop_data["site_name"].value
|
prop_data["site_name"] = prop_data["site_name"].value
|
||||||
|
@ -96,29 +96,30 @@ def process_result(result: Property) -> pd.DataFrame:
|
||||||
del prop_data["address"]
|
del prop_data["address"]
|
||||||
|
|
||||||
properties_df = pd.DataFrame([prop_data])
|
properties_df = pd.DataFrame([prop_data])
|
||||||
properties_df = properties_df[get_ordered_properties(result)]
|
properties_df = properties_df[_get_ordered_properties(result)]
|
||||||
|
|
||||||
return properties_df
|
return properties_df
|
||||||
|
|
||||||
|
|
||||||
def _scrape_single_site(
|
def _scrape_single_site(
|
||||||
location: str, site_name: str, listing_type: str
|
location: str, site_name: str, listing_type: str, proxy: str = None
|
||||||
) -> pd.DataFrame:
|
) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Helper function to scrape a single site.
|
Helper function to scrape a single site.
|
||||||
"""
|
"""
|
||||||
validate_input(site_name, listing_type)
|
_validate_input(site_name, listing_type)
|
||||||
|
|
||||||
scraper_input = ScraperInput(
|
scraper_input = ScraperInput(
|
||||||
location=location,
|
location=location,
|
||||||
listing_type=ListingType[listing_type.upper()],
|
listing_type=ListingType[listing_type.upper()],
|
||||||
site_name=SiteName.get_by_value(site_name.lower()),
|
site_name=SiteName.get_by_value(site_name.lower()),
|
||||||
|
proxy=proxy,
|
||||||
)
|
)
|
||||||
|
|
||||||
site = _scrapers[site_name.lower()](scraper_input)
|
site = _scrapers[site_name.lower()](scraper_input)
|
||||||
results = site.search()
|
results = site.search()
|
||||||
|
|
||||||
properties_dfs = [process_result(result) for result in results]
|
properties_dfs = [_process_result(result) for result in results]
|
||||||
properties_dfs = [
|
properties_dfs = [
|
||||||
df.dropna(axis=1, how="all") for df in properties_dfs if not df.empty
|
df.dropna(axis=1, how="all") for df in properties_dfs if not df.empty
|
||||||
]
|
]
|
||||||
|
@ -132,6 +133,7 @@ def scrape_property(
|
||||||
location: str,
|
location: str,
|
||||||
site_name: Union[str, list[str]] = None,
|
site_name: Union[str, list[str]] = None,
|
||||||
listing_type: str = "for_sale",
|
listing_type: str = "for_sale",
|
||||||
|
proxy: str = None,
|
||||||
) -> pd.DataFrame:
|
) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Scrape property from various sites from a given location and listing type.
|
Scrape property from various sites from a given location and listing type.
|
||||||
|
@ -151,13 +153,13 @@ def scrape_property(
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
if len(site_name) == 1:
|
if len(site_name) == 1:
|
||||||
final_df = _scrape_single_site(location, site_name[0], listing_type)
|
final_df = _scrape_single_site(location, site_name[0], listing_type, proxy)
|
||||||
results.append(final_df)
|
results.append(final_df)
|
||||||
else:
|
else:
|
||||||
with ThreadPoolExecutor() as executor:
|
with ThreadPoolExecutor() as executor:
|
||||||
futures = {
|
futures = {
|
||||||
executor.submit(
|
executor.submit(
|
||||||
_scrape_single_site, location, s_name, listing_type
|
_scrape_single_site, location, s_name, listing_type, proxy
|
||||||
): s_name
|
): s_name
|
||||||
for s_name in site_name
|
for s_name in site_name
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,36 +8,51 @@ def main():
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"location", type=str, help="Location to scrape (e.g., San Francisco, CA)"
|
"location", type=str, help="Location to scrape (e.g., San Francisco, CA)"
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
"-s",
|
||||||
"--site_name",
|
"--site_name",
|
||||||
type=str,
|
type=str,
|
||||||
nargs="*",
|
nargs="*",
|
||||||
default=None,
|
default=None,
|
||||||
help="Site name(s) to scrape from (e.g., realtor.com zillow)",
|
help="Site name(s) to scrape from (e.g., realtor, zillow)",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
"-l",
|
||||||
"--listing_type",
|
"--listing_type",
|
||||||
type=str,
|
type=str,
|
||||||
default="for_sale",
|
default="for_sale",
|
||||||
choices=["for_sale", "for_rent", "sold"],
|
choices=["for_sale", "for_rent", "sold"],
|
||||||
help="Listing type to scrape",
|
help="Listing type to scrape",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
"-o",
|
||||||
"--output",
|
"--output",
|
||||||
type=str,
|
type=str,
|
||||||
default="excel",
|
default="excel",
|
||||||
choices=["excel", "csv"],
|
choices=["excel", "csv"],
|
||||||
help="Output format",
|
help="Output format",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
"-f",
|
||||||
"--filename",
|
"--filename",
|
||||||
type=str,
|
type=str,
|
||||||
default=None,
|
default=None,
|
||||||
help="Name of the output file (without extension)",
|
help="Name of the output file (without extension)",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"-p", "--proxy", type=str, default=None, help="Proxy to use for scraping"
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
result = scrape_property(args.location, args.site_name, args.listing_type)
|
|
||||||
|
result = scrape_property(
|
||||||
|
args.location, args.site_name, args.listing_type, proxy=args.proxy
|
||||||
|
)
|
||||||
|
|
||||||
if not args.filename:
|
if not args.filename:
|
||||||
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||||
|
|
|
@ -8,7 +8,7 @@ class ScraperInput:
|
||||||
location: str
|
location: str
|
||||||
listing_type: ListingType
|
listing_type: ListingType
|
||||||
site_name: SiteName
|
site_name: SiteName
|
||||||
proxy_url: str | None = None
|
proxy: str | None = None
|
||||||
|
|
||||||
|
|
||||||
class Scraper:
|
class Scraper:
|
||||||
|
@ -16,16 +16,10 @@ class Scraper:
|
||||||
self.location = scraper_input.location
|
self.location = scraper_input.location
|
||||||
self.listing_type = scraper_input.listing_type
|
self.listing_type = scraper_input.listing_type
|
||||||
|
|
||||||
self.session = requests.Session()
|
self.session = requests.Session(proxies=scraper_input.proxy)
|
||||||
self.listing_type = scraper_input.listing_type
|
self.listing_type = scraper_input.listing_type
|
||||||
self.site_name = scraper_input.site_name
|
self.site_name = scraper_input.site_name
|
||||||
|
|
||||||
if scraper_input.proxy_url:
|
|
||||||
self.session.proxies = {
|
|
||||||
"http": scraper_input.proxy_url,
|
|
||||||
"https": scraper_input.proxy_url,
|
|
||||||
}
|
|
||||||
|
|
||||||
def search(self) -> list[Property]:
|
def search(self) -> list[Property]:
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
import string
|
|
||||||
from .. import Scraper
|
from .. import Scraper
|
||||||
from ....utils import parse_address_two, parse_unit
|
from ....utils import parse_address_two, parse_unit
|
||||||
from ....exceptions import GeoCoordsNotFound, NoResultsFound
|
from ....exceptions import GeoCoordsNotFound, NoResultsFound
|
||||||
|
@ -32,7 +31,9 @@ class ZillowScraper(Scraper):
|
||||||
return response.json()["results"] != []
|
return response.json()["results"] != []
|
||||||
|
|
||||||
def search(self):
|
def search(self):
|
||||||
resp = self.session.get(self.url, headers=self._get_headers())
|
resp = self.session.get(
|
||||||
|
self.url, headers=self._get_headers()
|
||||||
|
)
|
||||||
resp.raise_for_status()
|
resp.raise_for_status()
|
||||||
content = resp.text
|
content = resp.text
|
||||||
|
|
||||||
|
@ -129,7 +130,9 @@ class ZillowScraper(Scraper):
|
||||||
"wants": {"cat1": ["mapResults"]},
|
"wants": {"cat1": ["mapResults"]},
|
||||||
"isDebugRequest": False,
|
"isDebugRequest": False,
|
||||||
}
|
}
|
||||||
resp = self.session.put(url, headers=self._get_headers(), json=payload)
|
resp = self.session.put(
|
||||||
|
url, headers=self._get_headers(), json=payload
|
||||||
|
)
|
||||||
resp.raise_for_status()
|
resp.raise_for_status()
|
||||||
a = resp.json()
|
a = resp.json()
|
||||||
return self._parse_properties(resp.json())
|
return self._parse_properties(resp.json())
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "homeharvest"
|
name = "homeharvest"
|
||||||
version = "0.2.3"
|
version = "0.2.4"
|
||||||
description = "Real estate scraping library supporting Zillow, Realtor.com & Redfin."
|
description = "Real estate scraping library supporting Zillow, Realtor.com & Redfin."
|
||||||
authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"]
|
authors = ["Zachary Hampton <zachary@zacharysproducts.com>", "Cullen Watson <cullen@cullen.ai>"]
|
||||||
homepage = "https://github.com/ZacharyHampton/HomeHarvest"
|
homepage = "https://github.com/ZacharyHampton/HomeHarvest"
|
||||||
|
|
Loading…
Reference in New Issue