Compare commits

...

23 Commits

Author SHA1 Message Date
VitaminB16
91b137ef86 feat: Ability to query by time posted for linkedin, indeed, glassdoor, ziprecruiter (#103) 2024-02-09 14:02:03 -06:00
Cullen Watson
2563c5ca08 enh: Indeed company url (#104) 2024-02-09 12:05:10 -06:00
Cullen Watson
32282305c8 docs: readme 2024-02-08 18:13:19 -06:00
Cullen Watson
ccbea51f3c docs: readme 2024-02-04 09:25:10 -06:00
Cullen Watson
6ec7c24f7f enh(linkedin): search by company ids (#99) 2024-02-04 09:21:45 -06:00
Cullen Watson
02caf1b38d fix(zr): date posted (#98) 2024-02-03 07:20:53 -06:00
Cullen Watson
8e2ab277da fix(ziprecruiter): pagination (#97)
* fix(ziprecruiter): pagination

* chore: version
2024-02-02 20:48:28 -06:00
Cullen Watson
ce3bd84ee5 fix: indeed parse description bug (#96)
* fix(indeed): full descr

* chore: version
2024-02-02 18:21:55 -06:00
Cullen Watson
1ccf2290fe docs: readme 2024-02-02 17:59:24 -06:00
Cullen Watson
ec2eefc58a docs: readme 2024-02-02 17:58:15 -06:00
Cullen Watson
13c7694474 Easy apply (#95)
* enh(glassdoor): easy apply filter

* enh(ziprecruiter): easy apply

* enh(indeed): use mobile headers

* chore: version
2024-02-02 17:47:15 -06:00
Cullen Watson
bbe46fe3f4 enh(glassdoor): easy apply filter (#92) 2024-02-01 19:42:24 -06:00
Cullen Watson
b97c73ffd6 fix: clean description (#88) 2024-01-28 21:50:41 -06:00
Cullen Watson
5b3627b244 enh: full description param (#85) 2024-01-22 20:22:32 -06:00
Cullen Watson
2ec3b04777 fix(ziprecruiter): init cookies (#82) 2024-01-12 12:28:35 -06:00
Harish Vadaparty
89a5264391 add long scrape example (#81) 2024-01-12 12:24:00 -06:00
Cullen Watson
a7ad616567 fix: linkedin no results (#80) 2024-01-10 14:01:10 -06:00
cullenwatson
53bc33a43a chore: version 2024-01-09 19:33:56 -06:00
Cullen Watson
22870438c7 linkedin fix delays (#79) 2024-01-09 19:32:51 -06:00
Cullen Watson
aeb93b99f5 Update pyproject.toml 2024-01-03 12:04:50 -06:00
Cullen Watson
a5916edcdd fix(glassdoor): add retry adapter (#77) 2024-01-03 12:04:32 -06:00
Augusto Gunsch
33d442bf1e Add czech to Indeed (#72) 2023-12-02 02:42:54 -06:00
Zachary Hampton
6587e464fa Update README.md 2023-11-30 11:49:31 -07:00
13 changed files with 692 additions and 342 deletions

View File

@@ -5,10 +5,7 @@
**Not technical?** Try out the web scraping tool on our site at [usejobspy.com](https://usejobspy.com). **Not technical?** Try out the web scraping tool on our site at [usejobspy.com](https://usejobspy.com).
*Looking to build a data-focused software product?* **[Book a call](https://bunsly.com/)** *to *Looking to build a data-focused software product?* **[Book a call](https://bunsly.com/)** *to
work with us.* work with us.*
Check out another project we wrote: ***[HomeHarvest](https://github.com/Bunsly/HomeHarvest)** a Python package
for real estate scraping*
## Features ## Features
@@ -32,18 +29,20 @@ _Python version >= [3.10](https://www.python.org/downloads/release/python-3100/)
### Usage ### Usage
```python ```python
import csv
from jobspy import scrape_jobs from jobspy import scrape_jobs
jobs = scrape_jobs( jobs = scrape_jobs(
site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"], site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
search_term="software engineer", search_term="software engineer",
location="Dallas, TX", location="Dallas, TX",
results_wanted=10, results_wanted=20,
hours_old=72, # (only linkedin is hour specific, others round up to days old)
country_indeed='USA' # only needed for indeed / glassdoor country_indeed='USA' # only needed for indeed / glassdoor
) )
print(f"Found {len(jobs)} jobs") print(f"Found {len(jobs)} jobs")
print(jobs.head()) print(jobs.head())
jobs.to_csv("jobs.csv", index=False) # to_xlsx jobs.to_csv("jobs.csv", quoting=csv.QUOTE_NONNUMERIC, escapechar="\\", index=False) # to_xlsx
``` ```
### Output ### Output
@@ -70,10 +69,13 @@ Optional
├── job_type (enum): fulltime, parttime, internship, contract ├── job_type (enum): fulltime, parttime, internship, contract
├── proxy (str): in format 'http://user:pass@host:port' or [https, socks] ├── proxy (str): in format 'http://user:pass@host:port' or [https, socks]
├── is_remote (bool) ├── is_remote (bool)
├── full_description (bool): fetches full description for LinkedIn (slower)
├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type' ├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type'
├── easy_apply (bool): filters for jobs that are hosted on LinkedIn ├── easy_apply (bool): filters for jobs that are hosted on the job board site
├── linkedin_company_ids (list[int): searches for linkedin jobs with specific company ids
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling) ├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result) ├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
├── hours_old (int): filters jobs by the number of hours since the job was posted (all but LinkedIn rounds up to next day)
``` ```
### JobPost Schema ### JobPost Schema
@@ -82,6 +84,7 @@ Optional
JobPost JobPost
├── title (str) ├── title (str)
├── company (str) ├── company (str)
├── company_url (str)
├── job_url (str) ├── job_url (str)
├── location (object) ├── location (object)
│ ├── country (str) │ ├── country (str)
@@ -160,16 +163,11 @@ persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
**Q: Received a response code 429?** **Q: Received a response code 429?**
**A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend: **A:** This indicates that you have been blocked by the job board site for sending too many requests. All of the job board sites are aggressive with blocking. We recommend:
- Waiting a few seconds between requests. - Waiting some time between scrapes (site-dependent).
- Trying a VPN or proxy to change your IP address. - Trying a VPN or proxy to change your IP address.
--- ---
**Q: Experiencing a "Segmentation fault: 11" on macOS Catalina?**
**A:** This is due to `tls_client` dependency not supporting your architecture. Solutions and workarounds include:
- Upgrade to a newer version of MacOS
- Reach out to the maintainers of [tls_client](https://github.com/bogdanfinn/tls-client) for fixes

View File

@@ -2,12 +2,11 @@ from jobspy import scrape_jobs
import pandas as pd import pandas as pd
jobs: pd.DataFrame = scrape_jobs( jobs: pd.DataFrame = scrape_jobs(
site_name=["indeed", "linkedin", "zip_recruiter"], site_name=["indeed", "linkedin", "zip_recruiter", "glassdoor"],
search_term="software engineer", search_term="software engineer",
location="Dallas, TX", location="Dallas, TX",
results_wanted=50, # be wary the higher it is, the more likey you'll get blocked (rotating proxy should work tho) results_wanted=25, # be wary the higher it is, the more likey you'll get blocked (rotating proxy can help tho)
country_indeed="USA", country_indeed="USA",
offset=25 # start jobs from an offset (use if search failed and want to continue)
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001", # proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
) )
@@ -28,4 +27,4 @@ print("outputted to jobs.csv")
# jobs.to_xlsx('jobs.xlsx', index=False) # jobs.to_xlsx('jobs.xlsx', index=False)
# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook) # 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
# display(jobs) # display(jobs)

View File

@@ -0,0 +1,77 @@
from jobspy import scrape_jobs
import pandas as pd
import os
import time
# creates csv a new filename if the jobs.csv already exists.
csv_filename = "jobs.csv"
counter = 1
while os.path.exists(csv_filename):
csv_filename = f"jobs_{counter}.csv"
counter += 1
# results wanted and offset
results_wanted = 1000
offset = 0
all_jobs = []
# max retries
max_retries = 3
# nuumber of results at each iteration
results_in_each_iteration = 30
while len(all_jobs) < results_wanted:
retry_count = 0
while retry_count < max_retries:
print("Doing from", offset, "to", offset + results_in_each_iteration, "jobs")
try:
jobs = scrape_jobs(
site_name=["indeed"],
search_term="software engineer",
# New York, NY
# Dallas, TX
# Los Angeles, CA
location="Los Angeles, CA",
results_wanted=min(results_in_each_iteration, results_wanted - len(all_jobs)),
country_indeed="USA",
offset=offset,
# proxy="http://jobspy:5a4vpWtj8EeJ2hoYzk@ca.smartproxy.com:20001",
)
# Add the scraped jobs to the list
all_jobs.extend(jobs.to_dict('records'))
# Increment the offset for the next page of results
offset += results_in_each_iteration
# Add a delay to avoid rate limiting (you can adjust the delay time as needed)
print(f"Scraped {len(all_jobs)} jobs")
print("Sleeping secs", 100 * (retry_count + 1))
time.sleep(100 * (retry_count + 1)) # Sleep for 2 seconds between requests
break # Break out of the retry loop if successful
except Exception as e:
print(f"Error: {e}")
retry_count += 1
print("Sleeping secs before retry", 100 * (retry_count + 1))
time.sleep(100 * (retry_count + 1))
if retry_count >= max_retries:
print("Max retries reached. Exiting.")
break
# DataFrame from the collected job data
jobs_df = pd.DataFrame(all_jobs)
# Formatting
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", 50)
print(jobs_df)
jobs_df.to_csv(csv_filename, index=False)
print(f"Outputted to {csv_filename}")

18
poetry.lock generated
View File

@@ -1053,16 +1053,6 @@ files = [
{file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
{file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
{file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
{file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
{file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
{file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
@@ -2270,13 +2260,13 @@ test = ["flake8", "isort", "pytest"]
[[package]] [[package]]
name = "tls-client" name = "tls-client"
version = "0.2.1" version = "1.0"
description = "Advanced Python HTTP Client." description = "Advanced Python HTTP Client."
optional = false optional = false
python-versions = "*" python-versions = "*"
files = [ files = [
{file = "tls_client-0.2.1-py3-none-any.whl", hash = "sha256:124a710952b979d5e20b4e2b7879b7958d6e48a259d0f5b83101055eb173f0bd"}, {file = "tls_client-1.0-py3-none-any.whl", hash = "sha256:f1183f5e18cb31914bd62d11b350a33ea0293ea80fb91d69a3072821dece3e66"},
{file = "tls_client-0.2.1.tar.gz", hash = "sha256:473fb4c671d9d4ca6b818548ab6e955640dd589767bfce520830c5618c2f2e2b"}, {file = "tls_client-1.0.tar.gz", hash = "sha256:7f6de48ad4a0ef69b72682c76ce604155971e07b4bfb2148a36276194ae3e7a0"},
] ]
[[package]] [[package]]
@@ -2445,4 +2435,4 @@ files = [
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = "^3.10" python-versions = "^3.10"
content-hash = "f966f3979873eec2c3b13460067f5aa414c69aa8ab5cd3239c1cfa564fcb5deb" content-hash = "404a77d78066cbb2ef71015562baf44aa11d12aac29a191c1ccc7758bfda598a"

View File

@@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "python-jobspy" name = "python-jobspy"
version = "1.1.29" version = "1.1.43"
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter" description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"] authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
homepage = "https://github.com/Bunsly/JobSpy" homepage = "https://github.com/Bunsly/JobSpy"
@@ -13,7 +13,7 @@ packages = [
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.10" python = "^3.10"
requests = "^2.31.0" requests = "^2.31.0"
tls-client = "^0.2.1" tls-client = "*"
beautifulsoup4 = "^4.12.2" beautifulsoup4 = "^4.12.2"
pandas = "^2.1.0" pandas = "^2.1.0"
NUMPY = "1.24.2" NUMPY = "1.24.2"

View File

@@ -1,7 +1,6 @@
import pandas as pd import pandas as pd
import concurrent.futures from typing import Tuple
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Tuple, Optional
from .jobs import JobType, Location from .jobs import JobType, Location
from .scrapers.indeed import IndeedScraper from .scrapers.indeed import IndeedScraper
@@ -29,18 +28,22 @@ def _map_str_to_site(site_name: str) -> Site:
def scrape_jobs( def scrape_jobs(
site_name: str | list[str] | Site | list[Site], site_name: str | list[str] | Site | list[Site] | None = None,
search_term: str, search_term: str | None = None,
location: str = "", location: str | None = None,
distance: int = None, distance: int | None = None,
is_remote: bool = False, is_remote: bool = False,
job_type: str = None, job_type: str | None = None,
easy_apply: bool = False, # linkedin easy_apply: bool | None = None,
results_wanted: int = 15, results_wanted: int = 15,
country_indeed: str = "usa", country_indeed: str = "usa",
hyperlinks: bool = False, hyperlinks: bool = False,
proxy: Optional[str] = None, proxy: str | None = None,
offset: Optional[int] = 0, full_description: bool | None = False,
linkedin_company_ids: list[int] | None = None,
offset: int | None = 0,
hours_old: int = None,
**kwargs,
) -> pd.DataFrame: ) -> pd.DataFrame:
""" """
Simultaneously scrapes job data from multiple job sites. Simultaneously scrapes job data from multiple job sites.
@@ -55,18 +58,23 @@ def scrape_jobs(
job_type = get_enum_from_value(job_type) if job_type else None job_type = get_enum_from_value(job_type) if job_type else None
if type(site_name) == str: def get_site_type():
site_type = [_map_str_to_site(site_name)] site_types = list(Site)
else: #: if type(site_name) == list if isinstance(site_name, str):
site_type = [ site_types = [_map_str_to_site(site_name)]
_map_str_to_site(site) if type(site) == str else site_name elif isinstance(site_name, Site):
for site in site_name site_types = [site_name]
] elif isinstance(site_name, list):
site_types = [
_map_str_to_site(site) if isinstance(site, str) else site
for site in site_name
]
return site_types
country_enum = Country.from_string(country_indeed) country_enum = Country.from_string(country_indeed)
scraper_input = ScraperInput( scraper_input = ScraperInput(
site_type=site_type, site_type=get_site_type(),
country=country_enum, country=country_enum,
search_term=search_term, search_term=search_term,
location=location, location=location,
@@ -74,8 +82,11 @@ def scrape_jobs(
is_remote=is_remote, is_remote=is_remote,
job_type=job_type, job_type=job_type,
easy_apply=easy_apply, easy_apply=easy_apply,
full_description=full_description,
results_wanted=results_wanted, results_wanted=results_wanted,
linkedin_company_ids=linkedin_company_ids,
offset=offset, offset=offset,
hours_old=hours_old
) )
def scrape_site(site: Site) -> Tuple[str, JobResponse]: def scrape_site(site: Site) -> Tuple[str, JobResponse]:
@@ -110,7 +121,7 @@ def scrape_jobs(
executor.submit(worker, site): site for site in scraper_input.site_type executor.submit(worker, site): site for site in scraper_input.site_type
} }
for future in concurrent.futures.as_completed(future_to_site): for future in as_completed(future_to_site):
site_value, scraped_data = future.result() site_value, scraped_data = future.result()
site_to_jobs_dict[site_value] = scraped_data site_to_jobs_dict[site_value] = scraped_data
@@ -181,4 +192,4 @@ def scrape_jobs(
else: else:
jobs_formatted_df = pd.DataFrame() jobs_formatted_df = pd.DataFrame()
return jobs_formatted_df return jobs_formatted_df.sort_values(by='date_posted', ascending=False)

View File

@@ -1,7 +1,7 @@
from typing import Union, Optional from typing import Optional
from datetime import date from datetime import date
from enum import Enum from enum import Enum
from pydantic import BaseModel, validator from pydantic import BaseModel
class JobType(Enum): class JobType(Enum):
@@ -55,18 +55,24 @@ class JobType(Enum):
class Country(Enum): class Country(Enum):
ARGENTINA = ("argentina", "com.ar") """
Gets the subdomain for Indeed and Glassdoor.
The second item in the tuple is the subdomain for Indeed
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
"""
ARGENTINA = ("argentina", "ar", "com.ar")
AUSTRALIA = ("australia", "au", "com.au") AUSTRALIA = ("australia", "au", "com.au")
AUSTRIA = ("austria", "at", "at") AUSTRIA = ("austria", "at", "at")
BAHRAIN = ("bahrain", "bh") BAHRAIN = ("bahrain", "bh")
BELGIUM = ("belgium", "be", "nl:be") BELGIUM = ("belgium", "be", "fr:be")
BRAZIL = ("brazil", "br", "com.br") BRAZIL = ("brazil", "br", "com.br")
CANADA = ("canada", "ca", "ca") CANADA = ("canada", "ca", "ca")
CHILE = ("chile", "cl") CHILE = ("chile", "cl")
CHINA = ("china", "cn") CHINA = ("china", "cn")
COLOMBIA = ("colombia", "co") COLOMBIA = ("colombia", "co")
COSTARICA = ("costa rica", "cr") COSTARICA = ("costa rica", "cr")
CZECHREPUBLIC = ("czech republic", "cz") CZECHREPUBLIC = ("czech republic,czechia", "cz")
DENMARK = ("denmark", "dk") DENMARK = ("denmark", "dk")
ECUADOR = ("ecuador", "ec") ECUADOR = ("ecuador", "ec")
EGYPT = ("egypt", "eg") EGYPT = ("egypt", "eg")
@@ -112,8 +118,8 @@ class Country(Enum):
TURKEY = ("turkey", "tr") TURKEY = ("turkey", "tr")
UKRAINE = ("ukraine", "ua") UKRAINE = ("ukraine", "ua")
UNITEDARABEMIRATES = ("united arab emirates", "ae") UNITEDARABEMIRATES = ("united arab emirates", "ae")
UK = ("uk", "uk", "co.uk") UK = ("uk,united kingdom", "uk", "co.uk")
USA = ("usa", "www", "com") USA = ("usa,us,united states", "www", "com")
URUGUAY = ("uruguay", "uy") URUGUAY = ("uruguay", "uy")
VENEZUELA = ("venezuela", "ve") VENEZUELA = ("venezuela", "ve")
VIETNAM = ("vietnam", "vn") VIETNAM = ("vietnam", "vn")
@@ -147,7 +153,8 @@ class Country(Enum):
"""Convert a string to the corresponding Country enum.""" """Convert a string to the corresponding Country enum."""
country_str = country_str.strip().lower() country_str = country_str.strip().lower()
for country in cls: for country in cls:
if country.value[0] == country_str: country_names = country.value[0].split(',')
if country_str in country_names:
return country return country
valid_countries = [country.value for country in cls] valid_countries = [country.value for country in cls]
raise ValueError( raise ValueError(
@@ -167,10 +174,13 @@ class Location(BaseModel):
if self.state: if self.state:
location_parts.append(self.state) location_parts.append(self.state)
if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE): if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE):
if self.country.value[0] in ("usa", "uk"): country_name = self.country.value[0]
location_parts.append(self.country.value[0].upper()) if "," in country_name:
country_name = country_name.split(",")[0]
if country_name in ("usa", "uk"):
location_parts.append(country_name.upper())
else: else:
location_parts.append(self.country.value[0].title()) location_parts.append(country_name.title())
return ", ".join(location_parts) return ", ".join(location_parts)
@@ -181,11 +191,22 @@ class CompensationInterval(Enum):
DAILY = "daily" DAILY = "daily"
HOURLY = "hourly" HOURLY = "hourly"
@classmethod
def get_interval(cls, pay_period):
interval_mapping = {
"YEAR": cls.YEARLY,
"HOUR": cls.HOURLY,
}
if pay_period in interval_mapping:
return interval_mapping[pay_period].value
else:
return cls[pay_period].value if pay_period in cls.__members__ else None
class Compensation(BaseModel): class Compensation(BaseModel):
interval: Optional[CompensationInterval] = None interval: Optional[CompensationInterval] = None
min_amount: int | None = None min_amount: float | None = None
max_amount: int | None = None max_amount: float | None = None
currency: Optional[str] = "USD" currency: Optional[str] = "USD"

View File

@@ -1,5 +1,4 @@
from ..jobs import Enum, BaseModel, JobType, JobResponse, Country from ..jobs import Enum, BaseModel, JobType, JobResponse, Country
from typing import List, Optional, Any
class Site(Enum): class Site(Enum):
@@ -10,24 +9,26 @@ class Site(Enum):
class ScraperInput(BaseModel): class ScraperInput(BaseModel):
site_type: List[Site] site_type: list[Site]
search_term: str search_term: str | None = None
location: str = None location: str | None = None
country: Optional[Country] = Country.USA country: Country | None = Country.USA
distance: Optional[int] = None distance: int | None = None
is_remote: bool = False is_remote: bool = False
job_type: Optional[JobType] = None job_type: JobType | None = None
easy_apply: bool = None # linkedin easy_apply: bool | None = None
full_description: bool = False
offset: int = 0 offset: int = 0
linkedin_company_ids: list[int] | None = None
results_wanted: int = 15 results_wanted: int = 15
hours_old: int | None = None
class Scraper: class Scraper:
def __init__(self, site: Site, proxy: Optional[List[str]] = None): def __init__(self, site: Site, proxy: list[str] | None = None):
self.site = site self.site = site
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy) self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
def scrape(self, scraper_input: ScraperInput) -> JobResponse: def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...
...

View File

@@ -4,17 +4,17 @@ jobspy.scrapers.glassdoor
This module contains routines to scrape Glassdoor. This module contains routines to scrape Glassdoor.
""" """
import math
import time
import re
import json import json
from datetime import datetime, date import requests
from typing import Optional, Tuple, Any
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from typing import Optional
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor, as_completed
from ..utils import count_urgent_words, extract_emails_from_text
from .. import Scraper, ScraperInput, Site from .. import Scraper, ScraperInput, Site
from ..exceptions import GlassdoorException from ..exceptions import GlassdoorException
from ..utils import count_urgent_words, extract_emails_from_text, create_session from ..utils import create_session, modify_and_get_description
from ...jobs import ( from ...jobs import (
JobPost, JobPost,
Compensation, Compensation,
@@ -22,7 +22,6 @@ from ...jobs import (
Location, Location,
JobResponse, JobResponse,
JobType, JobType,
Country,
) )
@@ -31,7 +30,7 @@ class GlassdoorScraper(Scraper):
""" """
Initializes GlassdoorScraper with the Glassdoor job search url Initializes GlassdoorScraper with the Glassdoor job search url
""" """
site = Site(Site.ZIP_RECRUITER) site = Site(Site.GLASSDOOR)
super().__init__(site, proxy=proxy) super().__init__(site, proxy=proxy)
self.url = None self.url = None
@@ -49,15 +48,12 @@ class GlassdoorScraper(Scraper):
) -> (list[JobPost], str | None): ) -> (list[JobPost], str | None):
""" """
Scrapes a page of Glassdoor for jobs with scraper_input criteria Scrapes a page of Glassdoor for jobs with scraper_input criteria
:param scraper_input:
:return: jobs found on page
:return: cursor for next page
""" """
try: try:
payload = self.add_payload( payload = self.add_payload(
scraper_input, location_id, location_type, page_num, cursor scraper_input, location_id, location_type, page_num, cursor
) )
session = create_session(self.proxy, is_tls=False) session = create_session(self.proxy, is_tls=False, has_retry=True)
response = session.post( response = session.post(
f"{self.url}/graph", headers=self.headers(), timeout=10, data=payload f"{self.url}/graph", headers=self.headers(), timeout=10, data=payload
) )
@@ -74,48 +70,72 @@ class GlassdoorScraper(Scraper):
jobs_data = res_json["data"]["jobListings"]["jobListings"] jobs_data = res_json["data"]["jobListings"]["jobListings"]
jobs = [] jobs = []
for i, job in enumerate(jobs_data): with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
job_url = res_json["data"]["jobListings"]["jobListingSeoLinks"][ future_to_job_data = {executor.submit(self.process_job, job): job for job in jobs_data}
"linkItems" for future in as_completed(future_to_job_data):
][i]["url"] job_data = future_to_job_data[future]
if job_url in self.seen_urls: try:
continue job_post = future.result()
self.seen_urls.add(job_url) if job_post:
job = job["jobview"] jobs.append(job_post)
title = job["job"]["jobTitleText"] except Exception as exc:
company_name = job["header"]["employerNameFromSearch"] raise GlassdoorException(f'Glassdoor generated an exception: {exc}')
location_name = job["header"].get("locationName", "")
location_type = job["header"].get("locationType", "")
is_remote = False
location = None
if location_type == "S":
is_remote = True
else:
location = self.parse_location(location_name)
compensation = self.parse_compensation(job["header"])
job = JobPost(
title=title,
company_name=company_name,
job_url=job_url,
location=location,
compensation=compensation,
is_remote=is_remote,
)
jobs.append(job)
return jobs, self.get_cursor_for_page( return jobs, self.get_cursor_for_page(
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1 res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
) )
def process_job(self, job_data):
"""Processes a single job and fetches its description."""
job_id = job_data["jobview"]["job"]["listingId"]
job_url = f'{self.url}job-listing/j?jl={job_id}'
if job_url in self.seen_urls:
return None
self.seen_urls.add(job_url)
job = job_data["jobview"]
title = job["job"]["jobTitleText"]
company_name = job["header"]["employerNameFromSearch"]
company_id = job_data['jobview']['header']['employer']['id']
location_name = job["header"].get("locationName", "")
location_type = job["header"].get("locationType", "")
age_in_days = job["header"].get("ageInDays")
is_remote, location = False, None
date_posted = (datetime.now() - timedelta(days=age_in_days)).date() if age_in_days is not None else None
if location_type == "S":
is_remote = True
else:
location = self.parse_location(location_name)
compensation = self.parse_compensation(job["header"])
try:
description = self.fetch_job_description(job_id)
except Exception as e :
description = None
job_post = JobPost(
title=title,
company_url=f"{self.url}Overview/W-EI_IE{company_id}.htm" if company_id else None,
company_name=company_name,
date_posted=date_posted,
job_url=job_url,
location=location,
compensation=compensation,
is_remote=is_remote,
description=description,
emails=extract_emails_from_text(description) if description else None,
num_urgent_words=count_urgent_words(description) if description else None,
)
return job_post
def scrape(self, scraper_input: ScraperInput) -> JobResponse: def scrape(self, scraper_input: ScraperInput) -> JobResponse:
""" """
Scrapes Glassdoor for jobs with scraper_input criteria. Scrapes Glassdoor for jobs with scraper_input criteria.
:param scraper_input: Information about job search criteria. :param scraper_input: Information about job search criteria.
:return: JobResponse containing a list of jobs. :return: JobResponse containing a list of jobs.
""" """
scraper_input.results_wanted = min(900, scraper_input.results_wanted)
self.country = scraper_input.country self.country = scraper_input.country
self.url = self.country.get_url() self.url = self.country.get_url()
@@ -149,6 +169,41 @@ class GlassdoorScraper(Scraper):
return JobResponse(jobs=all_jobs) return JobResponse(jobs=all_jobs)
def fetch_job_description(self, job_id):
"""Fetches the job description for a single job ID."""
url = f"{self.url}/graph"
body = [
{
"operationName": "JobDetailQuery",
"variables": {
"jl": job_id,
"queryString": "q",
"pageTypeEnum": "SERP"
},
"query": """
query JobDetailQuery($jl: Long!, $queryString: String, $pageTypeEnum: PageTypeEnum) {
jobview: jobView(
listingId: $jl
contextHolder: {queryString: $queryString, pageTypeEnum: $pageTypeEnum}
) {
job {
description
__typename
}
__typename
}
}
"""
}
]
response = requests.post(url, json=body, headers=GlassdoorScraper.headers())
if response.status_code != 200:
return None
data = response.json()[0]
desc = data['data']['jobview']['job']['description']
soup = BeautifulSoup(desc, 'html.parser')
return modify_and_get_description(soup)
@staticmethod @staticmethod
def parse_compensation(data: dict) -> Optional[Compensation]: def parse_compensation(data: dict) -> Optional[Compensation]:
pay_period = data.get("payPeriod") pay_period = data.get("payPeriod")
@@ -161,15 +216,8 @@ class GlassdoorScraper(Scraper):
interval = None interval = None
if pay_period == "ANNUAL": if pay_period == "ANNUAL":
interval = CompensationInterval.YEARLY interval = CompensationInterval.YEARLY
elif pay_period == "MONTHLY": elif pay_period:
interval = CompensationInterval.MONTHLY interval = CompensationInterval.get_interval(pay_period)
elif pay_period == "WEEKLY":
interval = CompensationInterval.WEEKLY
elif pay_period == "DAILY":
interval = CompensationInterval.DAILY
elif pay_period == "HOURLY":
interval = CompensationInterval.HOURLY
min_amount = int(adjusted_pay.get("p10") // 1) min_amount = int(adjusted_pay.get("p10") // 1)
max_amount = int(adjusted_pay.get("p90") // 1) max_amount = int(adjusted_pay.get("p90") // 1)
@@ -180,17 +228,11 @@ class GlassdoorScraper(Scraper):
currency=currency, currency=currency,
) )
def get_job_type_enum(self, job_type_str: str) -> list[JobType] | None:
for job_type in JobType:
if job_type_str in job_type.value:
return [job_type]
return None
def get_location(self, location: str, is_remote: bool) -> (int, str): def get_location(self, location: str, is_remote: bool) -> (int, str):
if not location or is_remote: if not location or is_remote:
return "11047", "STATE" # remote options return "11047", "STATE" # remote options
url = f"{self.url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}" url = f"{self.url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
session = create_session(self.proxy) session = create_session(self.proxy, has_retry=True)
response = session.get(url) response = session.get(url)
if response.status_code != 200: if response.status_code != 200:
raise GlassdoorException( raise GlassdoorException(
@@ -204,6 +246,8 @@ class GlassdoorScraper(Scraper):
location_type = "CITY" location_type = "CITY"
elif location_type == "S": elif location_type == "S":
location_type = "STATE" location_type = "STATE"
elif location_type == 'N':
location_type = "COUNTRY"
return int(items[0]["locationId"]), location_type return int(items[0]["locationId"]), location_type
@staticmethod @staticmethod
@@ -213,12 +257,20 @@ class GlassdoorScraper(Scraper):
location_type: str, location_type: str,
page_num: int, page_num: int,
cursor: str | None = None, cursor: str | None = None,
) -> dict[str, str | Any]: ) -> str:
# `fromage` is the posting time filter in days
fromage = max(scraper_input.hours_old // 24, 1) if scraper_input.hours_old else None
filter_params = []
if scraper_input.easy_apply:
filter_params.append({"filterKey": "applicationType", "values": "1"})
if fromage:
filter_params.append({"filterKey": "fromAge", "values": str(fromage)})
payload = { payload = {
"operationName": "JobSearchResultsQuery", "operationName": "JobSearchResultsQuery",
"variables": { "variables": {
"excludeJobListingIds": [], "excludeJobListingIds": [],
"filterParams": [], "filterParams": filter_params,
"keyword": scraper_input.search_term, "keyword": scraper_input.search_term,
"numJobsToShow": 30, "numJobsToShow": 30,
"locationType": location_type, "locationType": location_type,
@@ -226,6 +278,8 @@ class GlassdoorScraper(Scraper):
"parameterUrlInput": f"IL.0,12_I{location_type}{location_id}", "parameterUrlInput": f"IL.0,12_I{location_type}{location_id}",
"pageNumber": page_num, "pageNumber": page_num,
"pageCursor": cursor, "pageCursor": cursor,
"fromage": fromage,
"sort": "date"
}, },
"query": "query JobSearchResultsQuery($excludeJobListingIds: [Long!], $keyword: String, $locationId: Int, $locationType: LocationTypeEnum, $numJobsToShow: Int!, $pageCursor: String, $pageNumber: Int, $filterParams: [FilterParams], $originalPageUrl: String, $seoFriendlyUrlInput: String, $parameterUrlInput: String, $seoUrl: Boolean) {\n jobListings(\n contextHolder: {searchParams: {excludeJobListingIds: $excludeJobListingIds, keyword: $keyword, locationId: $locationId, locationType: $locationType, numPerPage: $numJobsToShow, pageCursor: $pageCursor, pageNumber: $pageNumber, filterParams: $filterParams, originalPageUrl: $originalPageUrl, seoFriendlyUrlInput: $seoFriendlyUrlInput, parameterUrlInput: $parameterUrlInput, seoUrl: $seoUrl, searchType: SR}}\n ) {\n companyFilterOptions {\n id\n shortName\n __typename\n }\n filterOptions\n indeedCtk\n jobListings {\n ...JobView\n __typename\n }\n jobListingSeoLinks {\n linkItems {\n position\n url\n __typename\n }\n __typename\n }\n jobSearchTrackingKey\n jobsPageSeoData {\n pageMetaDescription\n pageTitle\n __typename\n }\n paginationCursors {\n cursor\n pageNumber\n __typename\n }\n indexablePageForSeo\n searchResultsMetadata {\n searchCriteria {\n implicitLocation {\n id\n localizedDisplayName\n type\n __typename\n }\n keyword\n location {\n id\n shortName\n localizedShortName\n localizedDisplayName\n type\n __typename\n }\n __typename\n }\n footerVO {\n countryMenu {\n childNavigationLinks {\n id\n link\n textKey\n __typename\n }\n __typename\n }\n __typename\n }\n helpCenterDomain\n helpCenterLocale\n jobAlert {\n jobAlertExists\n __typename\n }\n jobSerpFaq {\n questions {\n answer\n question\n __typename\n }\n __typename\n }\n jobSerpJobOutlook {\n occupation\n paragraph\n __typename\n }\n showMachineReadableJobs\n __typename\n }\n serpSeoLinksVO {\n relatedJobTitlesResults\n searchedJobTitle\n searchedKeyword\n searchedLocationIdAsString\n searchedLocationSeoName\n searchedLocationType\n topCityIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerNameResults\n topOccupationResults\n __typename\n }\n totalJobsCount\n __typename\n }\n}\n\nfragment JobView on JobListingSearchResult {\n jobview {\n header {\n adOrderId\n advertiserType\n adOrderSponsorshipLevel\n ageInDays\n divisionEmployerName\n easyApply\n employer {\n id\n name\n shortName\n __typename\n }\n employerNameFromSearch\n goc\n gocConfidence\n gocId\n jobCountryId\n jobLink\n jobResultTrackingKey\n jobTitleText\n locationName\n locationType\n locId\n needsCommission\n payCurrency\n payPeriod\n payPeriodAdjustedPay {\n p10\n p50\n p90\n __typename\n }\n rating\n salarySource\n savedJobId\n sponsored\n __typename\n }\n job {\n descriptionFragments\n importConfigId\n jobTitleId\n jobTitleText\n listingId\n __typename\n }\n jobListingAdminDetails {\n cpcVal\n importConfigId\n jobListingId\n jobSourceId\n userEligibleForAdminJobDetails\n __typename\n }\n overview {\n shortName\n squareLogoUrl\n __typename\n }\n __typename\n }\n __typename\n}\n", "query": "query JobSearchResultsQuery($excludeJobListingIds: [Long!], $keyword: String, $locationId: Int, $locationType: LocationTypeEnum, $numJobsToShow: Int!, $pageCursor: String, $pageNumber: Int, $filterParams: [FilterParams], $originalPageUrl: String, $seoFriendlyUrlInput: String, $parameterUrlInput: String, $seoUrl: Boolean) {\n jobListings(\n contextHolder: {searchParams: {excludeJobListingIds: $excludeJobListingIds, keyword: $keyword, locationId: $locationId, locationType: $locationType, numPerPage: $numJobsToShow, pageCursor: $pageCursor, pageNumber: $pageNumber, filterParams: $filterParams, originalPageUrl: $originalPageUrl, seoFriendlyUrlInput: $seoFriendlyUrlInput, parameterUrlInput: $parameterUrlInput, seoUrl: $seoUrl, searchType: SR}}\n ) {\n companyFilterOptions {\n id\n shortName\n __typename\n }\n filterOptions\n indeedCtk\n jobListings {\n ...JobView\n __typename\n }\n jobListingSeoLinks {\n linkItems {\n position\n url\n __typename\n }\n __typename\n }\n jobSearchTrackingKey\n jobsPageSeoData {\n pageMetaDescription\n pageTitle\n __typename\n }\n paginationCursors {\n cursor\n pageNumber\n __typename\n }\n indexablePageForSeo\n searchResultsMetadata {\n searchCriteria {\n implicitLocation {\n id\n localizedDisplayName\n type\n __typename\n }\n keyword\n location {\n id\n shortName\n localizedShortName\n localizedDisplayName\n type\n __typename\n }\n __typename\n }\n footerVO {\n countryMenu {\n childNavigationLinks {\n id\n link\n textKey\n __typename\n }\n __typename\n }\n __typename\n }\n helpCenterDomain\n helpCenterLocale\n jobAlert {\n jobAlertExists\n __typename\n }\n jobSerpFaq {\n questions {\n answer\n question\n __typename\n }\n __typename\n }\n jobSerpJobOutlook {\n occupation\n paragraph\n __typename\n }\n showMachineReadableJobs\n __typename\n }\n serpSeoLinksVO {\n relatedJobTitlesResults\n searchedJobTitle\n searchedKeyword\n searchedLocationIdAsString\n searchedLocationSeoName\n searchedLocationType\n topCityIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerIdsToNameResults {\n key\n value\n __typename\n }\n topEmployerNameResults\n topOccupationResults\n __typename\n }\n totalJobsCount\n __typename\n }\n}\n\nfragment JobView on JobListingSearchResult {\n jobview {\n header {\n adOrderId\n advertiserType\n adOrderSponsorshipLevel\n ageInDays\n divisionEmployerName\n easyApply\n employer {\n id\n name\n shortName\n __typename\n }\n employerNameFromSearch\n goc\n gocConfidence\n gocId\n jobCountryId\n jobLink\n jobResultTrackingKey\n jobTitleText\n locationName\n locationType\n locId\n needsCommission\n payCurrency\n payPeriod\n payPeriodAdjustedPay {\n p10\n p50\n p90\n __typename\n }\n rating\n salarySource\n savedJobId\n sponsored\n __typename\n }\n job {\n descriptionFragments\n importConfigId\n jobTitleId\n jobTitleText\n listingId\n __typename\n }\n jobListingAdminDetails {\n cpcVal\n importConfigId\n jobListingId\n jobSourceId\n userEligibleForAdminJobDetails\n __typename\n }\n overview {\n shortName\n squareLogoUrl\n __typename\n }\n __typename\n }\n __typename\n}\n",
} }
@@ -243,12 +297,18 @@ class GlassdoorScraper(Scraper):
payload["variables"]["filterParams"].append( payload["variables"]["filterParams"].append(
{"filterKey": "jobType", "values": filter_value} {"filterKey": "jobType", "values": filter_value}
) )
return json.dumps([payload]) return json.dumps([payload])
def parse_location(self, location_name: str) -> Location: @staticmethod
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
for job_type in JobType:
if job_type_str in job_type.value:
return [job_type]
@staticmethod
def parse_location(location_name: str) -> Location | None:
if not location_name or location_name == "Remote": if not location_name or location_name == "Remote":
return None return
city, _, state = location_name.partition(", ") city, _, state = location_name.partition(", ")
return Location(city=city, state=state) return Location(city=city, state=state)
@@ -257,7 +317,6 @@ class GlassdoorScraper(Scraper):
for cursor_data in pagination_cursors: for cursor_data in pagination_cursors:
if cursor_data["pageNumber"] == page_num: if cursor_data["pageNumber"] == page_num:
return cursor_data["cursor"] return cursor_data["cursor"]
return None
@staticmethod @staticmethod
def headers() -> dict: def headers() -> dict:

View File

@@ -6,8 +6,9 @@ This module contains routines to scrape Indeed.
""" """
import re import re
import math import math
import io
import json import json
import requests
from typing import Any
from datetime import datetime from datetime import datetime
import urllib.parse import urllib.parse
@@ -21,6 +22,7 @@ from ..utils import (
extract_emails_from_text, extract_emails_from_text,
create_session, create_session,
get_enum_from_job_type, get_enum_from_job_type,
modify_and_get_description
) )
from ...jobs import ( from ...jobs import (
JobPost, JobPost,
@@ -43,7 +45,7 @@ class IndeedScraper(Scraper):
site = Site(Site.INDEED) site = Site(Site.INDEED)
super().__init__(site, proxy=proxy) super().__init__(site, proxy=proxy)
self.jobs_per_page = 15 self.jobs_per_page = 25
self.seen_urls = set() self.seen_urls = set()
def scrape_page( def scrape_page(
@@ -59,29 +61,12 @@ class IndeedScraper(Scraper):
domain = self.country.indeed_domain_value domain = self.country.indeed_domain_value
self.url = f"https://{domain}.indeed.com" self.url = f"https://{domain}.indeed.com"
params = {
"q": scraper_input.search_term,
"l": scraper_input.location,
"filter": 0,
"start": scraper_input.offset + page * 10,
}
if scraper_input.distance:
params["radius"] = scraper_input.distance
sc_values = []
if scraper_input.is_remote:
sc_values.append("attr(DSQF7)")
if scraper_input.job_type:
sc_values.append("jt({})".format(scraper_input.job_type.value))
if sc_values:
params["sc"] = "0kf:" + "".join(sc_values) + ";"
try: try:
session = create_session(self.proxy, is_tls=True) session = create_session(self.proxy)
response = session.get( response = session.get(
f"{self.url}/jobs", f"{self.url}/m/jobs",
headers=self.get_headers(), headers=self.get_headers(),
params=params, params=self.add_params(scraper_input, page),
allow_redirects=True, allow_redirects=True,
timeout_seconds=10, timeout_seconds=10,
) )
@@ -95,13 +80,14 @@ class IndeedScraper(Scraper):
raise IndeedException(str(e)) raise IndeedException(str(e))
soup = BeautifulSoup(response.content, "html.parser") soup = BeautifulSoup(response.content, "html.parser")
job_list = []
total_num_jobs = IndeedScraper.total_jobs(soup)
if "did not match any jobs" in response.text: if "did not match any jobs" in response.text:
raise IndeedException("Parsing exception: Search did not match any jobs") return job_list, total_num_jobs
jobs = IndeedScraper.parse_jobs( jobs = IndeedScraper.parse_jobs(
soup soup
) #: can raise exception, handled by main scrape function ) #: can raise exception, handled by main scrape function
total_num_jobs = IndeedScraper.total_jobs(soup)
if ( if (
not jobs.get("metaData", {}) not jobs.get("metaData", {})
@@ -110,67 +96,51 @@ class IndeedScraper(Scraper):
): ):
raise IndeedException("No jobs found.") raise IndeedException("No jobs found.")
def process_job(job) -> JobPost | None: def process_job(job: dict, job_detailed: dict) -> JobPost | None:
job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}' job_url = f'{self.url}/m/jobs/viewjob?jk={job["jobkey"]}'
job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}' job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}'
if job_url in self.seen_urls: if job_url in self.seen_urls:
return None return None
self.seen_urls.add(job_url)
description = job_detailed['description']['html']
extracted_salary = job.get("extractedSalary")
compensation = None
if extracted_salary:
salary_snippet = job.get("salarySnippet")
currency = salary_snippet.get("currency") if salary_snippet else None
interval = (extracted_salary.get("type"),)
if isinstance(interval, tuple):
interval = interval[0]
interval = interval.upper()
if interval in CompensationInterval.__members__:
compensation = Compensation(
interval=CompensationInterval[interval],
min_amount=int(extracted_salary.get("min")),
max_amount=int(extracted_salary.get("max")),
currency=currency,
)
job_type = IndeedScraper.get_job_type(job) job_type = IndeedScraper.get_job_type(job)
timestamp_seconds = job["pubDate"] / 1000 timestamp_seconds = job["pubDate"] / 1000
date_posted = datetime.fromtimestamp(timestamp_seconds) date_posted = datetime.fromtimestamp(timestamp_seconds)
date_posted = date_posted.strftime("%Y-%m-%d") date_posted = date_posted.strftime("%Y-%m-%d")
description = self.get_description(job_url)
with io.StringIO(job["snippet"]) as f:
soup_io = BeautifulSoup(f, "html.parser")
li_elements = soup_io.find_all("li")
if description is None and li_elements:
description = " ".join(li.text for li in li_elements)
job_post = JobPost( job_post = JobPost(
title=job["normTitle"], title=job["normTitle"],
description=description, description=description,
company_name=job["company"], company_name=job["company"],
company_url=f"{self.url}{job_detailed['employer']['relativeCompanyPageUrl']}" if job_detailed['employer'] else None,
location=Location( location=Location(
city=job.get("jobLocationCity"), city=job.get("jobLocationCity"),
state=job.get("jobLocationState"), state=job.get("jobLocationState"),
country=self.country, country=self.country,
), ),
job_type=job_type, job_type=job_type,
compensation=compensation, compensation=self.get_compensation(job, job_detailed),
date_posted=date_posted, date_posted=date_posted,
job_url=job_url_client, job_url=job_url_client,
emails=extract_emails_from_text(description) if description else None, emails=extract_emails_from_text(description) if description else None,
num_urgent_words=count_urgent_words(description) num_urgent_words=count_urgent_words(description)
if description if description
else None, else None,
is_remote=self.is_remote_job(job), is_remote=IndeedScraper.is_job_remote(job, job_detailed, description)
) )
return job_post return job_post
workers = 10
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"] jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
with ThreadPoolExecutor(max_workers=1) as executor: job_keys = [job['jobkey'] for job in jobs]
jobs_detailed = self.get_job_details(job_keys)
with ThreadPoolExecutor(max_workers=workers) as executor:
job_results: list[Future] = [ job_results: list[Future] = [
executor.submit(process_job, job) for job in jobs executor.submit(process_job, job, job_detailed['job']) for job, job_detailed in zip(jobs, jobs_detailed)
] ]
job_list = [result.result() for result in job_results if result.result()] job_list = [result.result() for result in job_results if result.result()]
@@ -183,26 +153,34 @@ class IndeedScraper(Scraper):
:param scraper_input: :param scraper_input:
:return: job_response :return: job_response
""" """
pages_to_process = (
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
)
#: get first page to initialize session
job_list, total_results = self.scrape_page(scraper_input, 0) job_list, total_results = self.scrape_page(scraper_input, 0)
pages_processed = 1
with ThreadPoolExecutor(max_workers=1) as executor: while len(self.seen_urls) < scraper_input.results_wanted:
futures: list[Future] = [ pages_to_process = math.ceil((scraper_input.results_wanted - len(self.seen_urls)) / self.jobs_per_page)
executor.submit(self.scrape_page, scraper_input, page) new_jobs = False
for page in range(1, pages_to_process + 1)
]
for future in futures: with ThreadPoolExecutor(max_workers=10) as executor:
jobs, _ = future.result() futures: list[Future] = [
executor.submit(self.scrape_page, scraper_input, page + pages_processed)
for page in range(pages_to_process)
]
job_list += jobs for future in futures:
jobs, _ = future.result()
if jobs:
job_list += jobs
new_jobs = True
if len(self.seen_urls) >= scraper_input.results_wanted:
break
if len(job_list) > scraper_input.results_wanted: pages_processed += pages_to_process
job_list = job_list[: scraper_input.results_wanted] if not new_jobs:
break
if len(self.seen_urls) > scraper_input.results_wanted:
job_list = job_list[:scraper_input.results_wanted]
job_response = JobResponse( job_response = JobResponse(
jobs=job_list, jobs=job_list,
@@ -219,7 +197,7 @@ class IndeedScraper(Scraper):
parsed_url = urllib.parse.urlparse(job_page_url) parsed_url = urllib.parse.urlparse(job_page_url)
params = urllib.parse.parse_qs(parsed_url.query) params = urllib.parse.parse_qs(parsed_url.query)
jk_value = params.get("jk", [None])[0] jk_value = params.get("jk", [None])[0]
formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1" formatted_url = f"{self.url}/m/viewjob?jk={jk_value}&spa=1"
session = create_session(self.proxy) session = create_session(self.proxy)
try: try:
@@ -236,17 +214,23 @@ class IndeedScraper(Scraper):
return None return None
try: try:
data = json.loads(response.text) soup = BeautifulSoup(response.text, 'html.parser')
job_description = data["body"]["jobInfoWrapperModel"]["jobInfoModel"][ script_tags = soup.find_all('script')
"sanitizedJobDescription"
] job_description = ''
for tag in script_tags:
if 'window._initialData' in tag.text:
json_str = tag.text
json_str = json_str.split('window._initialData=')[1]
json_str = json_str.rsplit(';', 1)[0]
data = json.loads(json_str)
job_description = data["jobInfoWrapperModel"]["jobInfoModel"]["sanitizedJobDescription"]
break
except (KeyError, TypeError, IndexError): except (KeyError, TypeError, IndexError):
return None return None
soup = BeautifulSoup(job_description, "html.parser") soup = BeautifulSoup(job_description, "html.parser")
text_content = " ".join(soup.get_text(separator=" ").split()).strip() return modify_and_get_description(soup)
return text_content
@staticmethod @staticmethod
def get_job_type(job: dict) -> list[JobType] | None: def get_job_type(job: dict) -> list[JobType] | None:
@@ -267,6 +251,44 @@ class IndeedScraper(Scraper):
job_types.append(job_type) job_types.append(job_type)
return job_types return job_types
@staticmethod
def get_compensation(job: dict, job_detailed: dict) -> Compensation:
"""
Parses the job to get
:param job:
:param job_detailed:
:return: compensation object
"""
comp = job_detailed['compensation']['baseSalary']
if comp:
interval = IndeedScraper.get_correct_interval(comp['unitOfWork'])
if interval:
return Compensation(
interval=interval,
min_amount=round(comp['range'].get('min'), 2) if comp['range'].get('min') is not None else None,
max_amount=round(comp['range'].get('max'), 2) if comp['range'].get('max') is not None else None,
currency=job_detailed['compensation']['currencyCode']
)
extracted_salary = job.get("extractedSalary")
compensation = None
if extracted_salary:
salary_snippet = job.get("salarySnippet")
currency = salary_snippet.get("currency") if salary_snippet else None
interval = (extracted_salary.get("type"),)
if isinstance(interval, tuple):
interval = interval[0]
interval = interval.upper()
if interval in CompensationInterval.__members__:
compensation = Compensation(
interval=CompensationInterval[interval],
min_amount=int(extracted_salary.get("min")),
max_amount=int(extracted_salary.get("max")),
currency=currency,
)
return compensation
@staticmethod @staticmethod
def parse_jobs(soup: BeautifulSoup) -> dict: def parse_jobs(soup: BeautifulSoup) -> dict:
""" """
@@ -305,7 +327,7 @@ class IndeedScraper(Scraper):
raise IndeedException("Could not find mosaic provider job cards data") raise IndeedException("Could not find mosaic provider job cards data")
else: else:
raise IndeedException( raise IndeedException(
"Could not find a script tag containing mosaic provider data" "Could not find any results for the search"
) )
@staticmethod @staticmethod
@@ -329,26 +351,152 @@ class IndeedScraper(Scraper):
@staticmethod @staticmethod
def get_headers(): def get_headers():
return { return {
"authority": "www.indeed.com", 'Host': 'www.indeed.com',
"accept": "*/*", 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
"accept-language": "en-US,en;q=0.9", 'sec-fetch-site': 'same-origin',
"referer": "https://www.indeed.com/viewjob?jk=fe6182337d72c7b1&tk=1hcbfcmd0k62t802&from=serp&vjs=3&advn=8132938064490989&adid=408692607&ad=-6NYlbfkN0A3Osc99MJFDKjquSk4WOGT28ALb_ad4QMtrHreCb9ICg6MiSVy9oDAp3evvOrI7Q-O9qOtQTg1EPbthP9xWtBN2cOuVeHQijxHjHpJC65TjDtftH3AXeINjBvAyDrE8DrRaAXl8LD3Fs1e_xuDHQIssdZ2Mlzcav8m5jHrA0fA64ZaqJV77myldaNlM7-qyQpy4AsJQfvg9iR2MY7qeC5_FnjIgjKIy_lNi9OPMOjGRWXA94CuvC7zC6WeiJmBQCHISl8IOBxf7EdJZlYdtzgae3593TFxbkd6LUwbijAfjax39aAuuCXy3s9C4YgcEP3TwEFGQoTpYu9Pmle-Ae1tHGPgsjxwXkgMm7Cz5mBBdJioglRCj9pssn-1u1blHZM4uL1nK9p1Y6HoFgPUU9xvKQTHjKGdH8d4y4ETyCMoNF4hAIyUaysCKdJKitC8PXoYaWhDqFtSMR4Jys8UPqUV&xkcb=SoDD-_M3JLQfWnQTDh0LbzkdCdPP&xpse=SoBa6_I3JLW9FlWZlB0PbzkdCdPP&sjdu=i6xVERweJM_pVUvgf-MzuaunBTY7G71J5eEX6t4DrDs5EMPQdODrX7Nn-WIPMezoqr5wA_l7Of-3CtoiUawcHw", 'sec-fetch-dest': 'document',
"sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', 'accept-language': 'en-US,en;q=0.9',
"sec-ch-ua-mobile": "?0", 'sec-fetch-mode': 'navigate',
"sec-ch-ua-platform": '"Windows"', 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 192.0',
"sec-fetch-dest": "empty", 'referer': 'https://www.indeed.com/m/jobs?q=software%20intern&l=Dallas%2C%20TX&from=serpso&rq=1&rsIdx=3',
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
} }
@staticmethod @staticmethod
def is_remote_job(job: dict) -> bool: def add_params(scraper_input: ScraperInput, page: int) -> dict[str, str | Any]:
# `fromage` is the posting time filter in days
fromage = max(scraper_input.hours_old // 24, 1) if scraper_input.hours_old else None
params = {
"q": scraper_input.search_term,
"l": scraper_input.location if scraper_input.location else scraper_input.country.value[0].split(',')[-1],
"filter": 0,
"start": scraper_input.offset + page * 10,
"sort": "date",
"fromage": fromage,
}
if scraper_input.distance:
params["radius"] = scraper_input.distance
sc_values = []
if scraper_input.is_remote:
sc_values.append("attr(DSQF7)")
if scraper_input.job_type:
sc_values.append("jt({})".format(scraper_input.job_type.value))
if sc_values:
params["sc"] = "0kf:" + "".join(sc_values) + ";"
if scraper_input.easy_apply:
params['iafilter'] = 1
return params
@staticmethod
def is_job_remote(job: dict, job_detailed: dict, description: str) -> bool:
remote_keywords = ['remote', 'work from home', 'wfh']
is_remote_in_attributes = any(
any(keyword in attr['label'].lower() for keyword in remote_keywords)
for attr in job_detailed['attributes']
)
is_remote_in_description = any(keyword in description.lower() for keyword in remote_keywords)
is_remote_in_location = any(
keyword in job_detailed['location']['formatted']['long'].lower()
for keyword in remote_keywords
)
is_remote_in_taxonomy = any(
taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0
for taxonomy in job.get("taxonomyAttributes", [])
)
return is_remote_in_attributes or is_remote_in_description or is_remote_in_location
def get_job_details(self, job_keys: list[str]) -> dict:
""" """
:param job: Queries the GraphQL endpoint for detailed job information for the given job keys.
:return: bool
""" """
for taxonomy in job.get("taxonomyAttributes", []): url = "https://apis.indeed.com/graphql"
if taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0: headers = {
return True 'Host': 'apis.indeed.com',
return False 'content-type': 'application/json',
'indeed-api-key': '161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8',
'accept': 'application/json',
'indeed-locale': 'en-US',
'accept-language': 'en-US,en;q=0.9',
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1',
'indeed-app-info': 'appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone',
'indeed-co': 'US',
}
job_keys_gql = '[' + ', '.join(f'"{key}"' for key in job_keys) + ']'
payload = {
"query": f"""
query GetJobData {{
jobData(input: {{
jobKeys: {job_keys_gql}
}}) {{
results {{
job {{
key
title
description {{
html
}}
location {{
countryName
countryCode
city
postalCode
streetAddress
formatted {{
short
long
}}
}}
compensation {{
baseSalary {{
unitOfWork
range {{
... on Range {{
min
max
}}
}}
}}
currencyCode
}}
attributes {{
label
}}
employer {{
relativeCompanyPageUrl
}}
recruit {{
viewJobUrl
detailedSalary
workSchedule
}}
}}
}}
}}
}}
"""
}
response = requests.post(url, headers=headers, json=payload, proxies=self.proxy)
if response.status_code == 200:
return response.json()['data']['jobData']['results']
else:
return {}
@staticmethod
def get_correct_interval(interval: str) -> CompensationInterval:
interval_mapping = {
"DAY": "DAILY",
"YEAR": "YEARLY",
"HOUR": "HOURLY",
"WEEK": "WEEKLY",
"MONTH": "MONTHLY"
}
mapped_interval = interval_mapping.get(interval.upper(), None)
if mapped_interval and mapped_interval in CompensationInterval.__members__:
return CompensationInterval[mapped_interval]
else:
raise ValueError(f"Unsupported interval: {interval}")

View File

@@ -4,26 +4,40 @@ jobspy.scrapers.linkedin
This module contains routines to scrape LinkedIn. This module contains routines to scrape LinkedIn.
""" """
import time
import random
from typing import Optional from typing import Optional
from datetime import datetime from datetime import datetime
import requests import requests
import time
from requests.exceptions import ProxyError from requests.exceptions import ProxyError
from bs4 import BeautifulSoup
from bs4.element import Tag
from threading import Lock from threading import Lock
from bs4.element import Tag
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urlunparse from urllib.parse import urlparse, urlunparse
from .. import Scraper, ScraperInput, Site from .. import Scraper, ScraperInput, Site
from ..utils import count_urgent_words, extract_emails_from_text, get_enum_from_job_type, currency_parser
from ..exceptions import LinkedInException from ..exceptions import LinkedInException
from ...jobs import JobPost, Location, JobResponse, JobType, Country, Compensation from ..utils import create_session
from ...jobs import (
JobPost,
Location,
JobResponse,
JobType,
Country,
Compensation
)
from ..utils import (
count_urgent_words,
extract_emails_from_text,
get_enum_from_job_type,
currency_parser,
modify_and_get_description
)
class LinkedInScraper(Scraper): class LinkedInScraper(Scraper):
MAX_RETRIES = 3 DELAY = 3
DELAY = 10
def __init__(self, proxy: Optional[str] = None): def __init__(self, proxy: Optional[str] = None):
""" """
@@ -45,6 +59,12 @@ class LinkedInScraper(Scraper):
url_lock = Lock() url_lock = Lock()
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0 page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
seconds_old = (
scraper_input.hours_old * 3600
if scraper_input.hours_old
else None
)
def job_type_code(job_type_enum): def job_type_code(job_type_enum):
mapping = { mapping = {
JobType.FULL_TIME: "F", JobType.FULL_TIME: "F",
@@ -56,7 +76,10 @@ class LinkedInScraper(Scraper):
return mapping.get(job_type_enum, "") return mapping.get(job_type_enum, "")
while len(job_list) < scraper_input.results_wanted and page < 1000: continue_search = lambda: len(job_list) < scraper_input.results_wanted and page < 1000
while continue_search():
session = create_session(is_tls=False, has_retry=True, delay=5)
params = { params = {
"keywords": scraper_input.search_term, "keywords": scraper_input.search_term,
"location": scraper_input.location, "location": scraper_input.location,
@@ -68,47 +91,37 @@ class LinkedInScraper(Scraper):
"pageNum": 0, "pageNum": 0,
"start": page + scraper_input.offset, "start": page + scraper_input.offset,
"f_AL": "true" if scraper_input.easy_apply else None, "f_AL": "true" if scraper_input.easy_apply else None,
"f_C": ','.join(map(str, scraper_input.linkedin_company_ids)) if scraper_input.linkedin_company_ids else None,
"f_TPR": f"r{seconds_old}",
} }
params = {k: v for k, v in params.items() if v is not None} params = {k: v for k, v in params.items() if v is not None}
retries = 0 try:
while retries < self.MAX_RETRIES: response = session.get(
try: f"{self.url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
response = requests.get( params=params,
f"{self.url}/jobs-guest/jobs/api/seeMoreJobPostings/search?", allow_redirects=True,
params=params, proxies=self.proxy,
allow_redirects=True, headers=self.headers(),
proxies=self.proxy, timeout=10,
timeout=10,
)
response.raise_for_status()
break
except requests.HTTPError as e:
if hasattr(e, "response") and e.response is not None:
if e.response.status_code in (429, 502):
time.sleep(self.DELAY)
retries += 1
continue
else:
raise LinkedInException(
f"bad response status code: {e.response.status_code}"
)
else:
raise
except ProxyError as e:
raise LinkedInException("bad proxy")
except Exception as e:
raise LinkedInException(str(e))
else:
# Raise an exception if the maximum number of retries is reached
raise LinkedInException(
"Max retries reached, failed to get a valid response"
) )
response.raise_for_status()
except requests.HTTPError as e:
raise LinkedInException(
f"bad response status code: {e.response.status_code}"
)
except ProxyError as e:
raise LinkedInException("bad proxy")
except Exception as e:
raise LinkedInException(str(e))
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
job_cards = soup.find_all("div", class_="base-search-card")
if len(job_cards) == 0:
return JobResponse(jobs=job_list)
for job_card in soup.find_all("div", class_="base-search-card"): for job_card in job_cards:
job_url = None job_url = None
href_tag = job_card.find("a", class_="base-card__full-link") href_tag = job_card.find("a", class_="base-card__full-link")
if href_tag and "href" in href_tag.attrs: if href_tag and "href" in href_tag.attrs:
@@ -123,27 +136,29 @@ class LinkedInScraper(Scraper):
# Call process_job directly without threading # Call process_job directly without threading
try: try:
job_post = self.process_job(job_card, job_url) job_post = self.process_job(job_card, job_url, scraper_input.full_description)
if job_post: if job_post:
job_list.append(job_post) job_list.append(job_post)
except Exception as e: except Exception as e:
raise LinkedInException("Exception occurred while processing jobs") raise LinkedInException("Exception occurred while processing jobs")
page += 25 if continue_search():
time.sleep(random.uniform(LinkedInScraper.DELAY, LinkedInScraper.DELAY + 2))
page += 25
job_list = job_list[: scraper_input.results_wanted] job_list = job_list[: scraper_input.results_wanted]
return JobResponse(jobs=job_list) return JobResponse(jobs=job_list)
def process_job(self, job_card: Tag, job_url: str) -> Optional[JobPost]: def process_job(self, job_card: Tag, job_url: str, full_descr: bool) -> Optional[JobPost]:
salary_tag = job_card.find('span', class_='job-search-card__salary-info') salary_tag = job_card.find('span', class_='job-search-card__salary-info')
compensation = None compensation = None
if salary_tag: if salary_tag:
salary_text = salary_tag.get_text(separator=' ').strip() salary_text = salary_tag.get_text(separator=" ").strip()
salary_values = [currency_parser(value) for value in salary_text.split('-')] salary_values = [currency_parser(value) for value in salary_text.split("-")]
salary_min = salary_values[0] salary_min = salary_values[0]
salary_max = salary_values[1] salary_max = salary_values[1]
currency = salary_text[0] if salary_text[0] != '$' else 'USD' currency = salary_text[0] if salary_text[0] != "$" else "USD"
compensation = Compensation( compensation = Compensation(
min_amount=int(salary_min), min_amount=int(salary_min),
@@ -171,7 +186,7 @@ class LinkedInScraper(Scraper):
if metadata_card if metadata_card
else None else None
) )
date_posted = None date_posted = description = job_type = None
if datetime_tag and "datetime" in datetime_tag.attrs: if datetime_tag and "datetime" in datetime_tag.attrs:
datetime_str = datetime_tag["datetime"] datetime_str = datetime_tag["datetime"]
try: try:
@@ -180,21 +195,20 @@ class LinkedInScraper(Scraper):
date_posted = None date_posted = None
benefits_tag = job_card.find("span", class_="result-benefits__text") benefits_tag = job_card.find("span", class_="result-benefits__text")
benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None
if full_descr:
description, job_type = self.get_job_description(job_url) description, job_type = self.get_job_description(job_url)
# description, job_type = None, []
return JobPost( return JobPost(
title=title, title=title,
description=description,
company_name=company, company_name=company,
company_url=company_url, company_url=company_url,
location=location, location=location,
date_posted=date_posted, date_posted=date_posted,
job_url=job_url, job_url=job_url,
job_type=job_type,
compensation=compensation, compensation=compensation,
benefits=benefits, benefits=benefits,
job_type=job_type,
description=description,
emails=extract_emails_from_text(description) if description else None, emails=extract_emails_from_text(description) if description else None,
num_urgent_words=count_urgent_words(description) if description else None, num_urgent_words=count_urgent_words(description) if description else None,
) )
@@ -208,12 +222,10 @@ class LinkedInScraper(Scraper):
:return: description or None :return: description or None
""" """
try: try:
response = requests.get(job_page_url, timeout=5, proxies=self.proxy) session = create_session(is_tls=False, has_retry=True)
response = session.get(job_page_url, timeout=5, proxies=self.proxy)
response.raise_for_status() response.raise_for_status()
except requests.HTTPError as e: except requests.HTTPError as e:
if hasattr(e, "response") and e.response is not None:
if e.response.status_code in (429, 502):
time.sleep(self.DELAY)
return None, None return None, None
except Exception as e: except Exception as e:
return None, None return None, None
@@ -227,7 +239,7 @@ class LinkedInScraper(Scraper):
description = None description = None
if div_content: if div_content:
description = " ".join(div_content.get_text().split()).strip() description = modify_and_get_description(div_content)
def get_job_type( def get_job_type(
soup_job_type: BeautifulSoup, soup_job_type: BeautifulSoup,
@@ -287,3 +299,21 @@ class LinkedInScraper(Scraper):
) )
return location return location
@staticmethod
def headers() -> dict:
return {
"authority": "www.linkedin.com",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "en-US,en;q=0.9",
"cache-control": "max-age=0",
"sec-ch-ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
# 'sec-ch-ua-mobile': '?0',
# 'sec-ch-ua-platform': '"macOS"',
# 'sec-fetch-dest': 'document',
# 'sec-fetch-mode': 'navigate',
# 'sec-fetch-site': 'none',
# 'sec-fetch-user': '?1',
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
}

View File

@@ -1,11 +1,22 @@
import re import re
import numpy as np import numpy as np
import requests
import tls_client import tls_client
import requests
from requests.adapters import HTTPAdapter, Retry
from ..jobs import JobType from ..jobs import JobType
def modify_and_get_description(soup):
for li in soup.find_all('li'):
li.string = "- " + li.get_text()
description = soup.get_text(separator='\n').strip()
description = re.sub(r'\n+', '\n', description)
return description
def count_urgent_words(description: str) -> int: def count_urgent_words(description: str) -> int:
""" """
Count the number of urgent words or phrases in a job description. Count the number of urgent words or phrases in a job description.
@@ -27,11 +38,11 @@ def extract_emails_from_text(text: str) -> list[str] | None:
return email_regex.findall(text) return email_regex.findall(text)
def create_session(proxy: dict | None = None, is_tls: bool = True): def create_session(proxy: dict | None = None, is_tls: bool = True, has_retry: bool = False, delay: int = 1) -> requests.Session:
""" """
Creates a tls client session Creates a requests session with optional tls, proxy, and retry settings.
:return: A session object with or without proxies. :return: A session object
""" """
if is_tls: if is_tls:
session = tls_client.Session( session = tls_client.Session(
@@ -39,17 +50,21 @@ def create_session(proxy: dict | None = None, is_tls: bool = True):
random_tls_extension_order=True, random_tls_extension_order=True,
) )
session.proxies = proxy session.proxies = proxy
# TODO multiple proxies
# if self.proxies:
# session.proxies = {
# "http": random.choice(self.proxies),
# "https": random.choice(self.proxies),
# }
else: else:
session = requests.Session() session = requests.Session()
session.allow_redirects = True session.allow_redirects = True
if proxy: if proxy:
session.proxies.update(proxy) session.proxies.update(proxy)
if has_retry:
retries = Retry(total=3,
connect=3,
status=3,
status_forcelist=[500, 502, 503, 504, 429],
backoff_factor=delay)
adapter = HTTPAdapter(max_retries=retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session return session

View File

@@ -6,8 +6,7 @@ This module contains routines to scrape ZipRecruiter.
""" """
import math import math
import time import time
import re from datetime import datetime, timezone
from datetime import datetime, date
from typing import Optional, Tuple, Any from typing import Optional, Tuple, Any
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
@@ -15,8 +14,8 @@ from concurrent.futures import ThreadPoolExecutor
from .. import Scraper, ScraperInput, Site from .. import Scraper, ScraperInput, Site
from ..exceptions import ZipRecruiterException from ..exceptions import ZipRecruiterException
from ..utils import count_urgent_words, extract_emails_from_text, create_session
from ...jobs import JobPost, Compensation, Location, JobResponse, JobType, Country from ...jobs import JobPost, Compensation, Location, JobResponse, JobType, Country
from ..utils import count_urgent_words, extract_emails_from_text, create_session, modify_and_get_description
class ZipRecruiterScraper(Scraper): class ZipRecruiterScraper(Scraper):
@@ -26,10 +25,13 @@ class ZipRecruiterScraper(Scraper):
""" """
site = Site(Site.ZIP_RECRUITER) site = Site(Site.ZIP_RECRUITER)
self.url = "https://www.ziprecruiter.com" self.url = "https://www.ziprecruiter.com"
self.session = create_session(proxy)
self.get_cookies()
super().__init__(site, proxy=proxy) super().__init__(site, proxy=proxy)
self.jobs_per_page = 20 self.jobs_per_page = 20
self.seen_urls = set() self.seen_urls = set()
self.delay = 5
def find_jobs_in_page( def find_jobs_in_page(
self, scraper_input: ScraperInput, continue_token: str | None = None self, scraper_input: ScraperInput, continue_token: str | None = None
@@ -42,14 +44,12 @@ class ZipRecruiterScraper(Scraper):
""" """
params = self.add_params(scraper_input) params = self.add_params(scraper_input)
if continue_token: if continue_token:
params["continue"] = continue_token params["continue_from"] = continue_token
try: try:
session = create_session(self.proxy, is_tls=False) response = self.session.get(
response = session.get(
f"https://api.ziprecruiter.com/jobs-app/jobs", f"https://api.ziprecruiter.com/jobs-app/jobs",
headers=self.headers(), headers=self.headers(),
params=self.add_params(scraper_input), params=params
timeout=10,
) )
if response.status_code != 200: if response.status_code != 200:
raise ZipRecruiterException( raise ZipRecruiterException(
@@ -60,7 +60,6 @@ class ZipRecruiterScraper(Scraper):
raise ZipRecruiterException("bad proxy") raise ZipRecruiterException("bad proxy")
raise ZipRecruiterException(str(e)) raise ZipRecruiterException(str(e))
time.sleep(5)
response_data = response.json() response_data = response.json()
jobs_list = response_data.get("jobs", []) jobs_list = response_data.get("jobs", [])
next_continue_token = response_data.get("continue", None) next_continue_token = response_data.get("continue", None)
@@ -68,7 +67,7 @@ class ZipRecruiterScraper(Scraper):
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor: with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
job_results = [executor.submit(self.process_job, job) for job in jobs_list] job_results = [executor.submit(self.process_job, job) for job in jobs_list]
job_list = [result.result() for result in job_results if result.result()] job_list = list(filter(None, (result.result() for result in job_results)))
return job_list, next_continue_token return job_list, next_continue_token
def scrape(self, scraper_input: ScraperInput) -> JobResponse: def scrape(self, scraper_input: ScraperInput) -> JobResponse:
@@ -86,6 +85,9 @@ class ZipRecruiterScraper(Scraper):
if len(job_list) >= scraper_input.results_wanted: if len(job_list) >= scraper_input.results_wanted:
break break
if page > 1:
time.sleep(self.delay)
jobs_on_page, continue_token = self.find_jobs_in_page( jobs_on_page, continue_token = self.find_jobs_in_page(
scraper_input, continue_token scraper_input, continue_token
) )
@@ -95,22 +97,21 @@ class ZipRecruiterScraper(Scraper):
if not continue_token: if not continue_token:
break break
if len(job_list) > scraper_input.results_wanted: return JobResponse(jobs=job_list[: scraper_input.results_wanted])
job_list = job_list[: scraper_input.results_wanted]
return JobResponse(jobs=job_list) def process_job(self, job: dict) -> JobPost | None:
@staticmethod
def process_job(job: dict) -> JobPost:
"""Processes an individual job dict from the response""" """Processes an individual job dict from the response"""
title = job.get("name") title = job.get("name")
job_url = job.get("job_url") job_url = f"https://www.ziprecruiter.com/jobs//j?lvk={job['listing_key']}"
if job_url in self.seen_urls:
return
self.seen_urls.add(job_url)
description = BeautifulSoup( job_description_html = job.get("job_description", "").strip()
job.get("job_description", "").strip(), "html.parser" description_soup = BeautifulSoup(job_description_html, "html.parser")
).get_text() description = modify_and_get_description(description_soup)
company = job["hiring_company"].get("name") if "hiring_company" in job else None company = job.get("hiring_company", {}).get("name")
country_value = "usa" if job.get("job_country") == "US" else "canada" country_value = "usa" if job.get("job_country") == "US" else "canada"
country_enum = Country.from_string(country_value) country_enum = Country.from_string(country_value)
@@ -120,17 +121,7 @@ class ZipRecruiterScraper(Scraper):
job_type = ZipRecruiterScraper.get_job_type_enum( job_type = ZipRecruiterScraper.get_job_type_enum(
job.get("employment_type", "").replace("_", "").lower() job.get("employment_type", "").replace("_", "").lower()
) )
date_posted = datetime.fromisoformat(job['posted_time'].rstrip("Z")).date()
save_job_url = job.get("SaveJobURL", "")
posted_time_match = re.search(
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
)
if posted_time_match:
date_time_str = posted_time_match.group(1)
date_posted_obj = datetime.strptime(date_time_str, "%Y-%m-%dT%H:%M:%SZ")
date_posted = date_posted_obj.date()
else:
date_posted = date.today()
return JobPost( return JobPost(
title=title, title=title,
@@ -156,6 +147,11 @@ class ZipRecruiterScraper(Scraper):
num_urgent_words=count_urgent_words(description) if description else None, num_urgent_words=count_urgent_words(description) if description else None,
) )
def get_cookies(self):
url="https://api.ziprecruiter.com/jobs-app/event"
data="event_type=session&logged_in=false&number_of_retry=1&property=model%3AiPhone&property=os%3AiOS&property=locale%3Aen_us&property=app_build_number%3A4734&property=app_version%3A91.0&property=manufacturer%3AApple&property=timestamp%3A2024-01-12T12%3A04%3A42-06%3A00&property=screen_height%3A852&property=os_version%3A16.6.1&property=source%3Ainstall&property=screen_width%3A393&property=device_model%3AiPhone%2014%20Pro&property=brand%3AApple"
self.session.post(url, data=data, headers=ZipRecruiterScraper.headers())
@staticmethod @staticmethod
def get_job_type_enum(job_type_str: str) -> list[JobType] | None: def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
for job_type in JobType: for job_type in JobType:
@@ -168,8 +164,10 @@ class ZipRecruiterScraper(Scraper):
params = { params = {
"search": scraper_input.search_term, "search": scraper_input.search_term,
"location": scraper_input.location, "location": scraper_input.location,
"form": "jobs-landing",
} }
if scraper_input.hours_old:
fromage = max(scraper_input.hours_old // 24, 1) if scraper_input.hours_old else None
params['days'] = fromage
job_type_value = None job_type_value = None
if scraper_input.job_type: if scraper_input.job_type:
if scraper_input.job_type.value == "fulltime": if scraper_input.job_type.value == "fulltime":
@@ -178,6 +176,8 @@ class ZipRecruiterScraper(Scraper):
job_type_value = "part_time" job_type_value = "part_time"
else: else:
job_type_value = scraper_input.job_type.value job_type_value = scraper_input.job_type.value
if scraper_input.easy_apply:
params['zipapply'] = 1
if job_type_value: if job_type_value:
params[ params[
@@ -190,6 +190,8 @@ class ZipRecruiterScraper(Scraper):
if scraper_input.distance: if scraper_input.distance:
params["radius"] = scraper_input.distance params["radius"] = scraper_input.distance
params = {k: v for k, v in params.items() if v is not None}
return params return params
@staticmethod @staticmethod
@@ -200,7 +202,6 @@ class ZipRecruiterScraper(Scraper):
""" """
return { return {
"Host": "api.ziprecruiter.com", "Host": "api.ziprecruiter.com",
"Cookie": "ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38; SplitSV=2016-10-19%3AU2FsdGVkX19f9%2Bx70knxc%2FeR3xXR8lWoTcYfq5QjmLU%3D%0A; __cf_bm=qXim3DtLPbOL83GIp.ddQEOFVFTc1OBGPckiHYxcz3o-1698521532-0-AfUOCkgCZyVbiW1ziUwyefCfzNrJJTTKPYnif1FZGQkT60dMowmSU/Y/lP+WiygkFPW/KbYJmyc+MQSkkad5YygYaARflaRj51abnD+SyF9V; zglobalid=68d49bd5-0326-428e-aba8-8a04b64bc67c.af2d99ff7c03.653d61bb; ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38",
"accept": "*/*", "accept": "*/*",
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc", "x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc",
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0", "x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",