mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-05 03:54:31 -08:00
Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ec3b04777 | ||
|
|
89a5264391 | ||
|
|
a7ad616567 |
@@ -2,12 +2,11 @@ from jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
|
||||
jobs: pd.DataFrame = scrape_jobs(
|
||||
site_name=["indeed", "linkedin", "zip_recruiter"],
|
||||
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
|
||||
search_term="software engineer",
|
||||
location="Dallas, TX",
|
||||
results_wanted=50, # be wary the higher it is, the more likey you'll get blocked (rotating proxy should work tho)
|
||||
results_wanted=25, # be wary the higher it is, the more likey you'll get blocked (rotating proxy can help tho)
|
||||
country_indeed="USA",
|
||||
offset=25 # start jobs from an offset (use if search failed and want to continue)
|
||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||
)
|
||||
|
||||
@@ -28,4 +27,4 @@ print("outputted to jobs.csv")
|
||||
# jobs.to_xlsx('jobs.xlsx', index=False)
|
||||
|
||||
# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
|
||||
# display(jobs)
|
||||
# display(jobs)
|
||||
77
examples/JobSpy_LongScrape.py
Normal file
77
examples/JobSpy_LongScrape.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from jobspy import scrape_jobs
|
||||
import pandas as pd
|
||||
import os
|
||||
import time
|
||||
|
||||
# creates csv a new filename if the jobs.csv already exists.
|
||||
csv_filename = "jobs.csv"
|
||||
counter = 1
|
||||
while os.path.exists(csv_filename):
|
||||
csv_filename = f"jobs_{counter}.csv"
|
||||
counter += 1
|
||||
|
||||
# results wanted and offset
|
||||
results_wanted = 1000
|
||||
offset = 0
|
||||
|
||||
all_jobs = []
|
||||
|
||||
# max retries
|
||||
max_retries = 3
|
||||
|
||||
# nuumber of results at each iteration
|
||||
results_in_each_iteration = 30
|
||||
|
||||
while len(all_jobs) < results_wanted:
|
||||
retry_count = 0
|
||||
while retry_count < max_retries:
|
||||
print("Doing from", offset, "to", offset + results_in_each_iteration, "jobs")
|
||||
try:
|
||||
jobs = scrape_jobs(
|
||||
site_name=["indeed"],
|
||||
search_term="software engineer",
|
||||
# New York, NY
|
||||
# Dallas, TX
|
||||
|
||||
# Los Angeles, CA
|
||||
location="Los Angeles, CA",
|
||||
results_wanted=min(results_in_each_iteration, results_wanted - len(all_jobs)),
|
||||
country_indeed="USA",
|
||||
offset=offset,
|
||||
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
|
||||
)
|
||||
|
||||
# Add the scraped jobs to the list
|
||||
all_jobs.extend(jobs.to_dict('records'))
|
||||
|
||||
# Increment the offset for the next page of results
|
||||
offset += results_in_each_iteration
|
||||
|
||||
# Add a delay to avoid rate limiting (you can adjust the delay time as needed)
|
||||
print(f"Scraped {len(all_jobs)} jobs")
|
||||
print("Sleeping secs", 100 * (retry_count + 1))
|
||||
time.sleep(100 * (retry_count + 1)) # Sleep for 2 seconds between requests
|
||||
|
||||
break # Break out of the retry loop if successful
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
retry_count += 1
|
||||
print("Sleeping secs before retry", 100 * (retry_count + 1))
|
||||
time.sleep(100 * (retry_count + 1))
|
||||
if retry_count >= max_retries:
|
||||
print("Max retries reached. Exiting.")
|
||||
break
|
||||
|
||||
# DataFrame from the collected job data
|
||||
jobs_df = pd.DataFrame(all_jobs)
|
||||
|
||||
# Formatting
|
||||
pd.set_option("display.max_columns", None)
|
||||
pd.set_option("display.max_rows", None)
|
||||
pd.set_option("display.width", None)
|
||||
pd.set_option("display.max_colwidth", 50)
|
||||
|
||||
print(jobs_df)
|
||||
|
||||
jobs_df.to_csv(csv_filename, index=False)
|
||||
print(f"Outputted to {csv_filename}")
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "python-jobspy"
|
||||
version = "1.1.32"
|
||||
version = "1.1.34"
|
||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||
homepage = "https://github.com/Bunsly/JobSpy"
|
||||
|
||||
@@ -92,8 +92,11 @@ class LinkedInScraper(Scraper):
|
||||
raise LinkedInException(str(e))
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
job_cards = soup.find_all("div", class_="base-search-card")
|
||||
if len(job_cards) == 0:
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
for job_card in soup.find_all("div", class_="base-search-card"):
|
||||
for job_card in job_cards:
|
||||
job_url = None
|
||||
href_tag = job_card.find("a", class_="base-card__full-link")
|
||||
if href_tag and "href" in href_tag.attrs:
|
||||
|
||||
@@ -10,6 +10,7 @@ import re
|
||||
from datetime import datetime, date
|
||||
from typing import Optional, Tuple, Any
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
@@ -26,6 +27,8 @@ class ZipRecruiterScraper(Scraper):
|
||||
"""
|
||||
site = Site(Site.ZIP_RECRUITER)
|
||||
self.url = "https://www.ziprecruiter.com"
|
||||
self.session = create_session(proxy)
|
||||
self.get_cookies()
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
self.jobs_per_page = 20
|
||||
@@ -44,12 +47,10 @@ class ZipRecruiterScraper(Scraper):
|
||||
if continue_token:
|
||||
params["continue"] = continue_token
|
||||
try:
|
||||
session = create_session(self.proxy, is_tls=True)
|
||||
response = session.get(
|
||||
response = self.session.get(
|
||||
f"https://api.ziprecruiter.com/jobs-app/jobs",
|
||||
headers=self.headers(),
|
||||
params=self.add_params(scraper_input),
|
||||
timeout_seconds=10,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise ZipRecruiterException(
|
||||
@@ -156,6 +157,11 @@ class ZipRecruiterScraper(Scraper):
|
||||
num_urgent_words=count_urgent_words(description) if description else None,
|
||||
)
|
||||
|
||||
def get_cookies(self):
|
||||
url="https://api.ziprecruiter.com/jobs-app/event"
|
||||
data="event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
|
||||
self.session.post(url, data=data, headers=ZipRecruiterScraper.headers())
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
@@ -195,12 +201,16 @@ class ZipRecruiterScraper(Scraper):
|
||||
@staticmethod
|
||||
def headers() -> dict:
|
||||
"""
|
||||
Returns headers needed for ZipRecruiter API requests
|
||||
Returns headers needed for requests
|
||||
:return: dict - Dictionary containing headers
|
||||
"""
|
||||
return {
|
||||
'Host': 'api.ziprecruiter.com',
|
||||
'accept': '*/*',
|
||||
'authorization': 'Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==',
|
||||
'Cookie': '__cf_bm=DZ7eJOw6lka.Bwy5jLeDqWanaZ8BJlVAwaXrmcbYnxM-1701505132-0-AfGaVIfTA2kJlmleK14o722vbVwpZ+4UxFznsWv+guvzXSpD9KVEy/+pNzvEZUx88yaEShJwGt3/EVjhHirX/ASustKxg47V/aXRd2XIO2QN; zglobalid=61f94830-1990-4130-b222-d9d0e09c7825.57da9ea9581c.656ae86b; ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38; zva=100000000%3Bvid%3AZWroa0x_F1KEeGeU'
|
||||
"Host": "api.ziprecruiter.com",
|
||||
"accept": "*/*",
|
||||
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc",
|
||||
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",
|
||||
"x-deviceid": "D77B3A92-E589-46A4-8A39-6EF6F1D86006",
|
||||
"user-agent": "Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)",
|
||||
"authorization": "Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user