2015-12-03 00:50:34 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2023-09-11 16:30:55 +02:00
|
|
|
# Copyright 2015-2023 Mike Fährmann
|
2015-12-03 00:50:34 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2016-10-01 15:54:27 +02:00
|
|
|
"""Recursive extractor"""
|
2015-12-03 00:50:34 +01:00
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2018-09-16 20:59:22 +02:00
|
|
|
import re
|
2015-12-03 00:50:34 +01:00
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2016-10-01 15:54:27 +02:00
|
|
|
class RecursiveExtractor(Extractor):
|
2017-05-23 11:48:00 +02:00
|
|
|
"""Extractor that fetches URLs from a remote or local source"""
|
2016-10-01 15:54:27 +02:00
|
|
|
category = "recursive"
|
2019-02-12 10:20:21 +01:00
|
|
|
pattern = r"r(?:ecursive)?:"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "recursive:https://pastebin.com/raw/FLwrCYsT"
|
2015-12-03 00:50:34 +01:00
|
|
|
|
|
|
|
def items(self):
|
2023-09-13 21:47:20 +02:00
|
|
|
url = self.url.partition(":")[2]
|
2019-02-12 10:20:21 +01:00
|
|
|
|
2023-09-13 21:47:20 +02:00
|
|
|
if url.startswith("file://"):
|
|
|
|
with open(url[7:]) as fp:
|
|
|
|
page = fp.read()
|
2018-09-16 20:59:22 +02:00
|
|
|
else:
|
2023-09-13 21:47:20 +02:00
|
|
|
page = self.request(url).text
|
2018-09-16 20:59:22 +02:00
|
|
|
|
2023-09-13 21:47:20 +02:00
|
|
|
for match in re.finditer(r"https?://[^\s\"']+", page):
|
|
|
|
yield Message.Queue, match.group(0), {}
|