mirror of https://github.com/Bunsly/JobSpy
parent
ff1eb0f7b0
commit
ba5ed803ca
|
@ -1053,6 +1053,16 @@ files = [
|
||||||
{file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
|
{file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
|
||||||
{file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
|
{file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
|
||||||
{file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
|
{file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
|
||||||
|
{file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
|
||||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
|
{file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
|
||||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
|
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
|
||||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
|
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
|
||||||
|
@ -1243,36 +1253,39 @@ test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "numpy"
|
name = "numpy"
|
||||||
version = "1.25.2"
|
version = "1.24.2"
|
||||||
description = "Fundamental package for array computing in Python"
|
description = "Fundamental package for array computing in Python"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.8"
|
||||||
files = [
|
files = [
|
||||||
{ file = "numpy-1.25.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3" },
|
{file = "numpy-1.24.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eef70b4fc1e872ebddc38cddacc87c19a3709c0e3e5d20bf3954c147b1dd941d"},
|
||||||
{ file = "numpy-1.25.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f" },
|
{file = "numpy-1.24.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d2859428712785e8a8b7d2b3ef0a1d1565892367b32f915c4a4df44d0e64f5"},
|
||||||
{ file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187" },
|
{file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6524630f71631be2dabe0c541e7675db82651eb998496bbe16bc4f77f0772253"},
|
||||||
{ file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357" },
|
{file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a51725a815a6188c662fb66fb32077709a9ca38053f0274640293a14fdd22978"},
|
||||||
{ file = "numpy-1.25.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9" },
|
{file = "numpy-1.24.2-cp310-cp310-win32.whl", hash = "sha256:2620e8592136e073bd12ee4536149380695fbe9ebeae845b81237f986479ffc9"},
|
||||||
{ file = "numpy-1.25.2-cp310-cp310-win32.whl", hash = "sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044" },
|
{file = "numpy-1.24.2-cp310-cp310-win_amd64.whl", hash = "sha256:97cf27e51fa078078c649a51d7ade3c92d9e709ba2bfb97493007103c741f1d0"},
|
||||||
{ file = "numpy-1.25.2-cp310-cp310-win_amd64.whl", hash = "sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545" },
|
{file = "numpy-1.24.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7de8fdde0003f4294655aa5d5f0a89c26b9f22c0a58790c38fae1ed392d44a5a"},
|
||||||
{ file = "numpy-1.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418" },
|
{file = "numpy-1.24.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4173bde9fa2a005c2c6e2ea8ac1618e2ed2c1c6ec8a7657237854d42094123a0"},
|
||||||
{ file = "numpy-1.25.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f" },
|
{file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cecaed30dc14123020f77b03601559fff3e6cd0c048f8b5289f4eeabb0eb281"},
|
||||||
{ file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2" },
|
{file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a23f8440561a633204a67fb44617ce2a299beecf3295f0d13c495518908e910"},
|
||||||
{ file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf" },
|
{file = "numpy-1.24.2-cp311-cp311-win32.whl", hash = "sha256:e428c4fbfa085f947b536706a2fc349245d7baa8334f0c5723c56a10595f9b95"},
|
||||||
{ file = "numpy-1.25.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364" },
|
{file = "numpy-1.24.2-cp311-cp311-win_amd64.whl", hash = "sha256:557d42778a6869c2162deb40ad82612645e21d79e11c1dc62c6e82a2220ffb04"},
|
||||||
{ file = "numpy-1.25.2-cp311-cp311-win32.whl", hash = "sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d" },
|
{file = "numpy-1.24.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d0a2db9d20117bf523dde15858398e7c0858aadca7c0f088ac0d6edd360e9ad2"},
|
||||||
{ file = "numpy-1.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4" },
|
{file = "numpy-1.24.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c72a6b2f4af1adfe193f7beb91ddf708ff867a3f977ef2ec53c0ffb8283ab9f5"},
|
||||||
{ file = "numpy-1.25.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b79e513d7aac42ae918db3ad1341a015488530d0bb2a6abcbdd10a3a829ccfd3" },
|
{file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29e6bd0ec49a44d7690ecb623a8eac5ab8a923bce0bea6293953992edf3a76a"},
|
||||||
{ file = "numpy-1.25.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb942bfb6f84df5ce05dbf4b46673ffed0d3da59f13635ea9b926af3deb76926" },
|
{file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eabd64ddb96a1239791da78fa5f4e1693ae2dadc82a76bc76a14cbb2b966e96"},
|
||||||
{ file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0746410e73384e70d286f93abf2520035250aad8c5714240b0492a7302fdca" },
|
{file = "numpy-1.24.2-cp38-cp38-win32.whl", hash = "sha256:e3ab5d32784e843fc0dd3ab6dcafc67ef806e6b6828dc6af2f689be0eb4d781d"},
|
||||||
{ file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7806500e4f5bdd04095e849265e55de20d8cc4b661b038957354327f6d9b295" },
|
{file = "numpy-1.24.2-cp38-cp38-win_amd64.whl", hash = "sha256:76807b4063f0002c8532cfeac47a3068a69561e9c8715efdad3c642eb27c0756"},
|
||||||
{ file = "numpy-1.25.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8b77775f4b7df768967a7c8b3567e309f617dd5e99aeb886fa14dc1a0791141f" },
|
{file = "numpy-1.24.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4199e7cfc307a778f72d293372736223e39ec9ac096ff0a2e64853b866a8e18a"},
|
||||||
{ file = "numpy-1.25.2-cp39-cp39-win32.whl", hash = "sha256:2792d23d62ec51e50ce4d4b7d73de8f67a2fd3ea710dcbc8563a51a03fb07b01" },
|
{file = "numpy-1.24.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:adbdce121896fd3a17a77ab0b0b5eedf05a9834a18699db6829a64e1dfccca7f"},
|
||||||
{ file = "numpy-1.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:76b4115d42a7dfc5d485d358728cdd8719be33cc5ec6ec08632a5d6fca2ed380" },
|
{file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889b2cc88b837d86eda1b17008ebeb679d82875022200c6e8e4ce6cf549b7acb"},
|
||||||
{ file = "numpy-1.25.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55" },
|
{file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64bb98ac59b3ea3bf74b02f13836eb2e24e48e0ab0145bbda646295769bd780"},
|
||||||
{ file = "numpy-1.25.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901" },
|
{file = "numpy-1.24.2-cp39-cp39-win32.whl", hash = "sha256:63e45511ee4d9d976637d11e6c9864eae50e12dc9598f531c035265991910468"},
|
||||||
{ file = "numpy-1.25.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf" },
|
{file = "numpy-1.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:a77d3e1163a7770164404607b7ba3967fb49b24782a6ef85d9b5f54126cc39e5"},
|
||||||
{ file = "numpy-1.25.2.tar.gz", hash = "sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760" },
|
{file = "numpy-1.24.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92011118955724465fb6853def593cf397b4a1367495e0b59a7e69d40c4eb71d"},
|
||||||
|
{file = "numpy-1.24.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9006288bcf4895917d02583cf3411f98631275bc67cce355a7f39f8c14338fa"},
|
||||||
|
{file = "numpy-1.24.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:150947adbdfeceec4e5926d956a06865c1c690f2fd902efede4ca6fe2e657c3f"},
|
||||||
|
{file = "numpy-1.24.2.tar.gz", hash = "sha256:003a9f530e880cb2cd177cba1af7220b9aa42def9c4afc2a2fc3ee6be7eb2b22"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -2432,4 +2445,4 @@ files = [
|
||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = "^3.10"
|
python-versions = "^3.10"
|
||||||
content-hash = "0c50057af9ebbbe5c124c81758b41f05c05636739c3d1747e1bac74e75a046cb"
|
content-hash = "f966f3979873eec2c3b13460067f5aa414c69aa8ab5cd3239c1cfa564fcb5deb"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "python-jobspy"
|
name = "python-jobspy"
|
||||||
version = "1.1.14"
|
version = "1.1.15"
|
||||||
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
|
description = "Job scraper for LinkedIn, Indeed & ZipRecruiter"
|
||||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||||
homepage = "https://github.com/Bunsly/JobSpy"
|
homepage = "https://github.com/Bunsly/JobSpy"
|
||||||
|
|
|
@ -42,23 +42,22 @@ class ZipRecruiterScraper(Scraper):
|
||||||
self.jobs_per_page = 20
|
self.jobs_per_page = 20
|
||||||
self.seen_urls = set()
|
self.seen_urls = set()
|
||||||
|
|
||||||
def find_jobs_in_page(
|
def find_jobs_in_page(self, scraper_input: ScraperInput, continue_token: Optional[str] = None) -> Tuple[list[JobPost], Optional[str]]:
|
||||||
self, scraper_input: ScraperInput, page: int
|
|
||||||
) -> list[JobPost]:
|
|
||||||
"""
|
"""
|
||||||
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
|
||||||
:param scraper_input:
|
:param scraper_input:
|
||||||
:param page:
|
|
||||||
:return: jobs found on page
|
:return: jobs found on page
|
||||||
"""
|
"""
|
||||||
session = create_session(self.proxy)
|
params = self.add_params(scraper_input)
|
||||||
|
if continue_token:
|
||||||
|
params['continue'] = continue_token
|
||||||
try:
|
try:
|
||||||
response = session.get(
|
response = requests.get(
|
||||||
f"{self.url}/jobs-search",
|
f"https://api.ziprecruiter.com/jobs-app/jobs",
|
||||||
headers=self.headers(),
|
headers=self.headers(),
|
||||||
params=self.add_params(scraper_input, page),
|
params=self.add_params(scraper_input),
|
||||||
allow_redirects=True,
|
allow_redirects=True,
|
||||||
timeout_seconds=10,
|
timeout=10,
|
||||||
)
|
)
|
||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
raise ZipRecruiterException(
|
raise ZipRecruiterException(
|
||||||
|
@ -68,118 +67,65 @@ class ZipRecruiterScraper(Scraper):
|
||||||
if "Proxy responded with non 200 code" in str(e):
|
if "Proxy responded with non 200 code" in str(e):
|
||||||
raise ZipRecruiterException("bad proxy")
|
raise ZipRecruiterException("bad proxy")
|
||||||
raise ZipRecruiterException(str(e))
|
raise ZipRecruiterException(str(e))
|
||||||
else:
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
|
||||||
js_tag = soup.find("script", {"id": "js_variables"})
|
|
||||||
|
|
||||||
if js_tag:
|
response_data = response.json()
|
||||||
page_json = json.loads(js_tag.string)
|
jobs_list = response_data.get("jobs", [])
|
||||||
jobs_list = page_json.get("jobList")
|
next_continue_token = response_data.get('continue', None)
|
||||||
if jobs_list:
|
|
||||||
page_variant = "javascript"
|
|
||||||
# print('type javascript', len(jobs_list))
|
|
||||||
else:
|
|
||||||
page_variant = "html_2"
|
|
||||||
jobs_list = soup.find_all("div", {"class": "job_content"})
|
|
||||||
# print('type 2 html', len(jobs_list))
|
|
||||||
else:
|
|
||||||
page_variant = "html_1"
|
|
||||||
jobs_list = soup.find_all("li", {"class": "job-listing"})
|
|
||||||
# print('type 1 html', len(jobs_list))
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||||
if page_variant == "javascript":
|
|
||||||
job_results = [
|
job_results = [
|
||||||
executor.submit(self.process_job_javascript, job)
|
executor.submit(self.process_job, job)
|
||||||
for job in jobs_list
|
for job in jobs_list
|
||||||
]
|
]
|
||||||
elif page_variant == "html_1":
|
|
||||||
job_results = [
|
|
||||||
executor.submit(self.process_job_html_1, job) for job in jobs_list
|
|
||||||
]
|
|
||||||
elif page_variant == "html_2":
|
|
||||||
job_results = [
|
|
||||||
executor.submit(self.process_job_html_2, job) for job in jobs_list
|
|
||||||
]
|
|
||||||
|
|
||||||
job_list = [result.result() for result in job_results if result.result()]
|
job_list = [result.result() for result in job_results if result.result()]
|
||||||
return job_list
|
return job_list, next_continue_token
|
||||||
|
|
||||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||||
"""
|
"""
|
||||||
Scrapes ZipRecruiter for jobs with scraper_input criteria
|
Scrapes ZipRecruiter for jobs with scraper_input criteria.
|
||||||
:param scraper_input:
|
:param scraper_input: Information about job search criteria.
|
||||||
:return: job_response
|
:return: JobResponse containing a list of jobs.
|
||||||
"""
|
"""
|
||||||
start_page = (
|
job_list: list[JobPost] = []
|
||||||
(scraper_input.offset // self.jobs_per_page) + 1
|
continue_token = None
|
||||||
if scraper_input.offset
|
|
||||||
else 1
|
|
||||||
)
|
|
||||||
#: get first page to initialize session
|
|
||||||
job_list: list[JobPost] = self.find_jobs_in_page(scraper_input, start_page)
|
|
||||||
pages_to_process = max(
|
|
||||||
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
|
||||||
)
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
max_pages = math.ceil(scraper_input.results_wanted / self.jobs_per_page)
|
||||||
futures: list[Future] = [
|
|
||||||
executor.submit(self.find_jobs_in_page, scraper_input, page)
|
|
||||||
for page in range(start_page + 1, start_page + pages_to_process + 2)
|
|
||||||
]
|
|
||||||
|
|
||||||
for future in futures:
|
for page in range(1, max_pages + 1):
|
||||||
jobs = future.result()
|
if len(job_list) >= scraper_input.results_wanted:
|
||||||
|
break
|
||||||
|
|
||||||
job_list += jobs
|
jobs_on_page, continue_token = self.find_jobs_in_page(scraper_input, continue_token)
|
||||||
|
if jobs_on_page:
|
||||||
|
job_list.extend(jobs_on_page)
|
||||||
|
|
||||||
|
if not continue_token:
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(job_list) > scraper_input.results_wanted:
|
||||||
job_list = job_list[:scraper_input.results_wanted]
|
job_list = job_list[:scraper_input.results_wanted]
|
||||||
|
|
||||||
return JobResponse(jobs=job_list)
|
return JobResponse(jobs=job_list)
|
||||||
|
|
||||||
def process_job_javascript(self, job: dict) -> JobPost:
|
def process_job(self, job: dict) -> JobPost:
|
||||||
"""the most common type of jobs page on ZR"""
|
"""the most common type of jobs page on ZR"""
|
||||||
title = job.get("Title")
|
title = job.get("name")
|
||||||
job_url = job.get("JobURL")
|
job_url = job.get("job_url")
|
||||||
|
|
||||||
description, updated_job_url = self.get_description(job_url)
|
|
||||||
# job_url = updated_job_url if updated_job_url else job_url
|
# job_url = updated_job_url if updated_job_url else job_url
|
||||||
if description is None:
|
|
||||||
description = BeautifulSoup(
|
description = BeautifulSoup(
|
||||||
job.get("Snippet", "").strip(), "html.parser"
|
job.get("job_description", "").strip(), "html.parser"
|
||||||
).get_text()
|
).get_text()
|
||||||
|
|
||||||
company = job.get("OrgName")
|
company = job.get("source")
|
||||||
location = Location(
|
location = Location(
|
||||||
city=job.get("City"), state=job.get("State"), country=Country.US_CANADA
|
city=job.get("job_city"), state=job.get("job_state"), country='usa' if job.get("job_country") == 'US' else 'canada'
|
||||||
)
|
)
|
||||||
job_type = ZipRecruiterScraper.get_job_type_enum(
|
job_type = ZipRecruiterScraper.get_job_type_enum(
|
||||||
job.get("EmploymentType", "").replace("-", "").lower()
|
job.get("employment_type", "").replace("_", "").lower()
|
||||||
)
|
)
|
||||||
|
|
||||||
formatted_salary = job.get("FormattedSalaryShort", "")
|
|
||||||
salary_parts = formatted_salary.split(" ")
|
|
||||||
|
|
||||||
min_salary_str = salary_parts[0][1:].replace(",", "")
|
|
||||||
if "." in min_salary_str:
|
|
||||||
min_amount = int(float(min_salary_str) * 1000)
|
|
||||||
else:
|
|
||||||
min_amount = int(min_salary_str.replace("K", "000"))
|
|
||||||
|
|
||||||
if len(salary_parts) >= 3 and salary_parts[2].startswith("$"):
|
|
||||||
max_salary_str = salary_parts[2][1:].replace(",", "")
|
|
||||||
if "." in max_salary_str:
|
|
||||||
max_amount = int(float(max_salary_str) * 1000)
|
|
||||||
else:
|
|
||||||
max_amount = int(max_salary_str.replace("K", "000"))
|
|
||||||
else:
|
|
||||||
max_amount = 0
|
|
||||||
|
|
||||||
compensation = Compensation(
|
|
||||||
interval=CompensationInterval.YEARLY,
|
|
||||||
min_amount=min_amount,
|
|
||||||
max_amount=max_amount,
|
|
||||||
currency="USD/CAD",
|
|
||||||
)
|
|
||||||
save_job_url = job.get("SaveJobURL", "")
|
save_job_url = job.get("SaveJobURL", "")
|
||||||
posted_time_match = re.search(
|
posted_time_match = re.search(
|
||||||
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
|
||||||
|
@ -196,7 +142,7 @@ class ZipRecruiterScraper(Scraper):
|
||||||
company_name=company,
|
company_name=company,
|
||||||
location=location,
|
location=location,
|
||||||
job_type=job_type,
|
job_type=job_type,
|
||||||
compensation=compensation,
|
# compensation=compensation,
|
||||||
date_posted=date_posted,
|
date_posted=date_posted,
|
||||||
job_url=job_url,
|
job_url=job_url,
|
||||||
description=description,
|
description=description,
|
||||||
|
@ -204,95 +150,6 @@ class ZipRecruiterScraper(Scraper):
|
||||||
num_urgent_words=count_urgent_words(description) if description else None,
|
num_urgent_words=count_urgent_words(description) if description else None,
|
||||||
)
|
)
|
||||||
|
|
||||||
def process_job_html_2(self, job: Tag) -> Optional[JobPost]:
|
|
||||||
"""
|
|
||||||
second most common type of jobs page on ZR after process_job_javascript()
|
|
||||||
Parses a job from the job content tag for a second variat of HTML that ZR uses
|
|
||||||
:param job: BeautifulSoup Tag for one job post
|
|
||||||
:return JobPost
|
|
||||||
"""
|
|
||||||
job_url = job.find("a", class_="job_link")["href"]
|
|
||||||
title = job.find("h2", class_="title").text
|
|
||||||
company = job.find("a", class_="company_name").text.strip()
|
|
||||||
|
|
||||||
description, updated_job_url = self.get_description(job_url)
|
|
||||||
# job_url = updated_job_url if updated_job_url else job_url
|
|
||||||
if description is None:
|
|
||||||
description = job.find("p", class_="job_snippet").get_text().strip()
|
|
||||||
|
|
||||||
job_type_text = job.find("li", class_="perk_item perk_type")
|
|
||||||
job_type = None
|
|
||||||
if job_type_text:
|
|
||||||
job_type_text = (
|
|
||||||
job_type_text.get_text()
|
|
||||||
.strip()
|
|
||||||
.lower()
|
|
||||||
.replace("-", "")
|
|
||||||
.replace(" ", "")
|
|
||||||
)
|
|
||||||
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
|
|
||||||
date_posted = ZipRecruiterScraper.get_date_posted(job)
|
|
||||||
|
|
||||||
job_post = JobPost(
|
|
||||||
title=title,
|
|
||||||
company_name=company,
|
|
||||||
location=ZipRecruiterScraper.get_location(job),
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url,
|
|
||||||
description=description,
|
|
||||||
emails=extract_emails_from_text(description) if description else None,
|
|
||||||
num_urgent_words=count_urgent_words(description) if description else None,
|
|
||||||
)
|
|
||||||
return job_post
|
|
||||||
|
|
||||||
def process_job_html_1(self, job: Tag) -> Optional[JobPost]:
|
|
||||||
"""
|
|
||||||
TODO this method isnt finished due to not encountering this type of html often
|
|
||||||
least common type of jobs page on ZR (rarely found)
|
|
||||||
Parses a job from the job content tag
|
|
||||||
:param job: BeautifulSoup Tag for one job post
|
|
||||||
:return JobPost
|
|
||||||
"""
|
|
||||||
job_url = job.find("a", {"class": "job_link"})["href"]
|
|
||||||
# job_url = self.cleanurl(job.find("a", {"class": "job_link"})["href"])
|
|
||||||
if job_url in self.seen_urls:
|
|
||||||
return None
|
|
||||||
|
|
||||||
title = job.find("h2", {"class": "title"}).text
|
|
||||||
company = job.find("a", {"class": "company_name"}).text.strip()
|
|
||||||
|
|
||||||
description, _ = self.get_description(job_url)
|
|
||||||
# job_url = updated_job_url if updated_job_url else job_url
|
|
||||||
# get description from jobs listing page if get_description from the specific job page fails
|
|
||||||
if description is None:
|
|
||||||
description = job.find("p", {"class": "job_snippet"}).text.strip()
|
|
||||||
|
|
||||||
job_type_element = job.find("li", {"class": "perk_item perk_type"})
|
|
||||||
job_type = None
|
|
||||||
if job_type_element:
|
|
||||||
job_type_text = (
|
|
||||||
job_type_element.text.strip().lower().replace("_", "").replace(" ", "")
|
|
||||||
)
|
|
||||||
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
|
|
||||||
|
|
||||||
date_posted = ZipRecruiterScraper.get_date_posted(job)
|
|
||||||
|
|
||||||
job_post = JobPost(
|
|
||||||
title=title,
|
|
||||||
description=description,
|
|
||||||
company_name=company,
|
|
||||||
location=ZipRecruiterScraper.get_location(job),
|
|
||||||
job_type=job_type,
|
|
||||||
compensation=ZipRecruiterScraper.get_compensation(job),
|
|
||||||
date_posted=date_posted,
|
|
||||||
job_url=job_url,
|
|
||||||
emails=extract_emails_from_text(description),
|
|
||||||
num_urgent_words=count_urgent_words(description),
|
|
||||||
)
|
|
||||||
return job_post
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||||
for job_type in JobType:
|
for job_type in JobType:
|
||||||
|
@ -300,39 +157,11 @@ class ZipRecruiterScraper(Scraper):
|
||||||
return [job_type]
|
return [job_type]
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_description(self, job_page_url: str) -> Tuple[str | None, str | None]:
|
|
||||||
"""
|
|
||||||
Retrieves job description by going to the job page url
|
|
||||||
:param job_page_url:
|
|
||||||
:return: description or None, response url
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
session = create_session(self.proxy)
|
|
||||||
response = session.get(
|
|
||||||
job_page_url,
|
|
||||||
headers=self.headers(),
|
|
||||||
allow_redirects=True,
|
|
||||||
timeout_seconds=5,
|
|
||||||
)
|
|
||||||
if response.status_code not in range(200, 400):
|
|
||||||
return None, None
|
|
||||||
except Exception as e:
|
|
||||||
return None, None
|
|
||||||
|
|
||||||
html_string = response.content
|
|
||||||
soup_job = BeautifulSoup(html_string, "html.parser")
|
|
||||||
|
|
||||||
job_description_div = soup_job.find("div", {"class": "job_description"})
|
|
||||||
if job_description_div:
|
|
||||||
return job_description_div.text.strip(), response.url
|
|
||||||
return None, response.url
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def add_params(scraper_input, page) -> dict[str, str | Any]:
|
def add_params(scraper_input) -> dict[str, str | Any]:
|
||||||
params = {
|
params = {
|
||||||
"search": scraper_input.search_term,
|
"search": scraper_input.search_term,
|
||||||
"location": scraper_input.location,
|
"location": scraper_input.location,
|
||||||
"page": page,
|
|
||||||
"form": "jobs-landing",
|
"form": "jobs-landing",
|
||||||
}
|
}
|
||||||
job_type_value = None
|
job_type_value = None
|
||||||
|
@ -465,11 +294,13 @@ class ZipRecruiterScraper(Scraper):
|
||||||
:return: dict - Dictionary containing headers
|
:return: dict - Dictionary containing headers
|
||||||
"""
|
"""
|
||||||
return {
|
return {
|
||||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
|
'Host': 'api.ziprecruiter.com',
|
||||||
|
'Cookie': 'ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38; SplitSV=2016-10-19%3AU2FsdGVkX19f9%2Bx70knxc%2FeR3xXR8lWoTcYfq5QjmLU%3D%0A; __cf_bm=qXim3DtLPbOL83GIp.ddQEOFVFTc1OBGPckiHYxcz3o-1698521532-0-AfUOCkgCZyVbiW1ziUwyefCfzNrJJTTKPYnif1FZGQkT60dMowmSU/Y/lP+WiygkFPW/KbYJmyc+MQSkkad5YygYaARflaRj51abnD+SyF9V; zglobalid=68d49bd5-0326-428e-aba8-8a04b64bc67c.af2d99ff7c03.653d61bb; ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38',
|
||||||
|
'accept': '*/*',
|
||||||
|
'x-zr-zva-override': '100000000;vid:ZT1huzm_EQlDTVEc',
|
||||||
|
'x-pushnotificationid': '0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0',
|
||||||
|
'x-deviceid': 'D77B3A92-E589-46A4-8A39-6EF6F1D86006',
|
||||||
|
'user-agent': 'Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)',
|
||||||
|
'authorization': 'Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==',
|
||||||
|
'accept-language': 'en-US,en;q=0.9'
|
||||||
}
|
}
|
||||||
|
|
||||||
# @staticmethod
|
|
||||||
# def cleanurl(url) -> str:
|
|
||||||
# parsed_url = urlparse(url)
|
|
||||||
#
|
|
||||||
# return urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, parsed_url.params, '', ''))
|
|
||||||
|
|
Loading…
Reference in New Issue