前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >readability-lxml 源码解析(三):`readability.py`

readability-lxml 源码解析(三):`readability.py`

作者头像
ApacheCN_飞龙
发布2023-10-13 09:25:47
2250
发布2023-10-13 09:25:47
举报
文章被收录于专栏:信数据得永生
代码语言:javascript
复制
#!/usr/bin/env python
from __future__ import print_function
import logging
import re
import sys

from lxml.etree import tounicode
from lxml.etree import _ElementTree
from lxml.html import document_fromstring
from lxml.html import fragment_fromstring
from lxml.html import HtmlElement

from .cleaners import clean_attributes
from .cleaners import html_cleaner
from .htmls import build_doc
from .htmls import get_body
from .htmls import get_title
from .htmls import get_author
from .htmls import shorten_title
from .compat import str_, bytes_, tostring_, pattern_type
from .debug import describe, text_content


log = logging.getLogger("readability.readability")

# 但是根据代码来看,【肯定】和【可能】的意思是反着的
# 肯定的正面或者负面类名只是加减权重
# 可能的正面类名会保留,负面类名会被移除
REGEXES = {
    # 可能的负面类名
    "unlikelyCandidatesRe": re.compile(
        r"combx|comment|community|disqus|extra|foot|header|menu|remark|rss|shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|popup|tweet|twitter",
        re.I,
    ),
    # 可能的正面类名
    "okMaybeItsACandidateRe": re.compile(r"and|article|body|column|main|shadow", re.I),
    # 肯定的正面类型
    "positiveRe": re.compile(
        r"article|body|content|entry|hentry|main|page|pagination|post|text|blog|story",
        re.I,
    ),
    # 肯定的负面类名
    "negativeRe": re.compile(
        r"combx|comment|com-|contact|foot|footer|footnote|masthead|media|meta|outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|tool|widget",
        re.I,
    ),
    # 如果`<div>`不包含以下元素,应该转换为`<p>`
    "divToPElementsRe": re.compile(
        r"<(a|blockquote|dl|div|img|ol|p|pre|table|ul)", re.I
    ),
    #'replaceBrsRe': re.compile(r'(<br[^>]*>[ \n\r\t]*){2,}',re.I),
    #'replaceFontsRe': re.compile(r'<(\/?)font[^>]*>',re.I),
    #'trimRe': re.compile(r'^\s+|\s+$/'),
    #'normalizeRe': re.compile(r'\s{2,}/'),
    #'killBreaksRe': re.compile(r'(<br\s*\/?>(\s|&nbsp;?)*){1,}/'),
    "videoRe": re.compile(r"https?:\/\/(www\.)?(youtube|vimeo)\.com", re.I),
    # skipFootnoteLink:      /^\s*(\[?[a-z0-9]{1,2}\]?|^|edit|citation needed)\s*$/i,
}


class Unparseable(ValueError):
    pass

# 将字体大小文本转为整数
def to_int(x):
    if not x:
        return None
    x = x.strip()
    # 如果单位是 px 直接返回数值
    if x.endswith("px"):
        return int(x[:-2])
    # 如果是 em 就乘 12
    if x.endswith("em"):
        return int(x[:-2]) * 12
    return int(x)


def clean(text):
    # Many spaces make the following regexes run forever
    # 将超过 255 的空白字符,替换为 255 个空格
    text = re.sub(r"\s{255,}", " " * 255, text)
    # 移除行前后的空格
    text = re.sub(r"\s*\n\s*", "\n", text)
    # 将制表符替换为空格,连续两个以后空格替换为单个空格
    text = re.sub(r"\t|[ \t]{2,}", " ", text)
    return text.strip()

# 返回整洁版的长度
def text_length(i):
    return len(clean(i.text_content() or ""))

# 获取匹配指定元素的模式对象
def compile_pattern(elements):
    if not elements:
        return None
    elif isinstance(elements, pattern_type):
        # 如果输入已经是模式对象,直接返回
        return elements
    elif isinstance(elements, (str_, bytes_)):
        # 如果输入是字节串或者字符串
        # 先把字节串转换为字符串,以便下一步处理
        if isinstance(elements, bytes_):
            elements = str_(elements, "utf-8")
        # 再把字符串按照逗号分割,以便下一步处理
        elements = elements.split(u",")
    # 如果输入是列表或者元素
    # 将他们用`|`连在一起构造模式串
    if isinstance(elements, (list, tuple)):
        return re.compile(u"|".join([re.escape(x.strip()) for x in elements]), re.U)
    else:
        # 如果以上情况都不符合,抛异常
        raise Exception("Unknown type for the pattern: {}".format(type(elements)))
        # assume string or string like object


class Document:
    """Class to build a etree document out of html."""

    def __init__(
        self,
        input,
        positive_keywords=None,
        negative_keywords=None,
        url=None,
        min_text_length=25,
        retry_length=250,
        xpath=False,
        handle_failures="discard",
    ):
        """Generate the document

        :param input: string of the html content.
        :param positive_keywords: regex, list or comma-separated string of patterns in classes and ids
        :param negative_keywords: regex, list or comma-separated string in classes and ids
        :param min_text_length: Tunable. Set to a higher value for more precise detection of longer texts.
        :param retry_length: Tunable. Set to a lower value for better detection of very small texts.
        :param xpath: If set to True, adds x="..." attribute to each HTML node,
        containing xpath path pointing to original document path (allows to
        reconstruct selected summary in original document).
        :param handle_failures: Parameter passed to `lxml` for handling failure during exception.
        Support options = ["discard", "ignore", None]

        Examples:
            positive_keywords=["news-item", "block"]
            positive_keywords=["news-item, block"]
            positive_keywords=re.compile("news|block")
            negative_keywords=["mysidebar", "related", "ads"]

        The Document class is not re-enterable.
        It is designed to create a new Document() for each HTML file to process it.

        API methods:
        .title() -- full title
        .short_title() -- cleaned up title
        .content() -- full content
        .summary() -- cleaned up content
        """
        # 将参数赋给属性
        # 要处理的 HTML 文本或者节点
        self.input = input
        # 解析后的文档节点(不知道为啥不叫`doc`)
        self.html = None
        # 文档编码
        self.encoding = None
        # 自定义的正面和负面类名
        self.positive_keywords = compile_pattern(positive_keywords)
        self.negative_keywords = compile_pattern(negative_keywords)
        # URL,补链接用的
        self.url = url
        # 最小文本长度,决定是不是要丢弃节点
        self.min_text_length = min_text_length
        # 文本重试长度,结果小于这个值会重试
        self.retry_length = retry_length
        self.xpath = xpath
        self.handle_failures = handle_failures

    def _html(self, force=False):
        # 如果强制更新,或者`html`属性为空
        if force or self.html is None:
            # 将`input`解析为文档树,保存到`html`
            self.html = self._parse(self.input)
            if self.xpath:
                # 如果缓存 XPATH
                root = self.html.getroottree()
                # 对于根节点的每个子节点
                # 将`x`属性设为 XPATH
                for i in self.html.getiterator():
                    # print root.getpath(i)
                    i.attrib["x"] = root.getpath(i)
        return self.html

    # 将输入解析为文档树
    def _parse(self, input):
        # 如果输入已经是文档树了
        # 不做处理,编码设为默认值
        if isinstance(input, (_ElementTree, HtmlElement)):
            doc = input
            self.encoding = 'utf-8'
        else:
            # 否则将输入解析为文档树
            doc, self.encoding = build_doc(input)
        # 对文档树执行清理
        doc = html_cleaner.clean_html(doc)
        # 如果文档的 URL 是已知的
        base_href = self.url
        if base_href:
            # trying to guard against bad links like <a href="http://[http://...">
            try:
                # such support is added in lxml 3.3.0
                # 将所有链接变成绝对链接
                # 也就是计算`join(base, link)`
                doc.make_links_absolute(
                    base_href,
                    resolve_base_href=True,
                    handle_failures=self.handle_failures,
                )
            except TypeError:  # make_links_absolute() got an unexpected keyword argument 'handle_failures'
                # then we have lxml < 3.3.0
                # please upgrade to lxml >= 3.3.0 if you're failing here!
                # 和上面一样不知道啥情况
                doc.make_links_absolute(
                    base_href,
                    resolve_base_href=True,
                    handle_failures=self.handle_failures,
                )
        else:
            # 
            doc.resolve_base_href(handle_failures=self.handle_failures)
        return doc

    # 获取整洁版正文
    def content(self):
        """Returns document body"""
        return get_body(self._html(True))
    # 获取标题
    def title(self):
        """Returns document title"""
        return get_title(self._html(True))
    # 获取作者
    def author(self):
        """Returns document author"""
        return get_author(self._html(True))
    # 获取简短标题
    def short_title(self):
        """Returns cleaned up document title"""
        return shorten_title(self._html(True))

    # 获取文档树的 HTML并移除不良属性
    def get_clean_html(self):
        """
        An internal method, which can be overridden in subclasses, for example,
        to disable or to improve DOM-to-text conversion in .summary() method
        """
        return clean_attributes(tounicode(self.html, method="html"))

    # 获取文章(正文中的文章)
    def summary(self, html_partial=False):
        """
        Given a HTML file, extracts the text of the article.

        :param html_partial: return only the div of the document, don't wrap
                             in html and body tags.

        Warning: It mutates internal DOM representation of the HTML document,
        so it is better to call other API methods before this one.
        """
        try:
            ruthless = True
            while True:
                # 解析 HTML
                self._html(True)
                # 移除所有`<script>`和`<style>`
                for i in self.tags(self.html, "script", "style"):
                    i.drop_tree()
                # 给`<body>`添加 ID
                for i in self.tags(self.html, "body"):
                    i.set("id", "readabilityBody")
                # 移除带有可能的负面名称的节点
                if ruthless:
                    self.remove_unlikely_candidates()
                # 将误用的`<div>`转换为`<p>`
                self.transform_misused_divs_into_paragraphs()
                # 给段落打分获取候选节点
                candidates = self.score_paragraphs()
                # 按照内容得分选出最佳候选
                best_candidate = self.select_best_candidate(candidates)
                if best_candidate:
                   # 如果存在最佳候选,获取它的内容作为文章
                   article = self.get_article(
                        candidates, best_candidate, html_partial=html_partial
                    )
                else:
                    # 否则,不移除带有可能的负面名称的节点,再次尝试
                    if ruthless:
                        log.info("ruthless removal did not work. ")
                        ruthless = False
                        log.debug(
                            (
                                "ended up stripping too much - "
                                "going for a safer _parse"
                            )
                        )
                        # try again
                        continue
                    else:
                        # 如果尝试过了,就直接将`<body>`的内容作为文章
                        log.debug(
                            (
                                "Ruthless and lenient parsing did not work. "
                                "Returning raw html"
                            )
                        )
                        article = self.html.find("body")
                        # 如果`<body>`也找不到,就直接将输入文本作为文章
                        if article is None:
                            article = self.html
                # 对文章执行整理
                cleaned_article = self.sanitize(article, candidates)

                # 获取文章长度
                article_length = len(cleaned_article or "")
                retry_length = self.retry_length
                # 如果文章长度不够,并且删除可能的负面类名,就重试
                of_acceptable_length = article_length >= retry_length
                if ruthless and not of_acceptable_length:
                    ruthless = False
                    # Loop through and try again.
                    continue
                else:
                    # 否则直接返回
                    return cleaned_article
        except Exception as e:
            log.exception("error getting summary: ")
            if sys.version_info[0] == 2:
                from .compat.two import raise_with_traceback
            else:
                from .compat.three import raise_with_traceback
            raise_with_traceback(Unparseable, sys.exc_info()[2], str_(e))

    # 查看最佳候选的兄弟节点,有没有什么遗漏的
    def get_article(self, candidates, best_candidate, html_partial=False):
        # Now that we have the top candidate, look through its siblings for
        # content that might also be related.
        # Things like preambles, content split by ads that we removed, etc.
        sibling_score_threshold = max([10, best_candidate["content_score"] * 0.2])
        # create a new html document with a html->body->div
        # 创建一个`<div>`容器,包含结果文本
        if html_partial:
            output = fragment_fromstring("<div/>")
        else:
            output = document_fromstring("<div/>")
        # 获取最佳候选的所有兄弟节点
        best_elem = best_candidate["elem"]
        parent = best_elem.getparent()
        siblings = parent.getchildren() if parent is not None else [best_elem]
        # 遍历兄弟节点
        for sibling in siblings:
            # in lxml there no concept of simple text
            # if isinstance(sibling, NavigableString): continue
            append = False
            # 如果遍历到了最佳候选,把它加进结果中
            if sibling is best_elem:
                append = True
            # 如果兄弟节点在候选集里面,并且内容得分大于阈值
            # 加进结果中
            sibling_key = sibling  # HashableElement(sibling)
            if (
                sibling_key in candidates
                and candidates[sibling_key]["content_score"] >= sibling_score_threshold
            ):
                append = True
            
            if sibling.tag == "p":
                link_density = self.get_link_density(sibling)
                node_content = sibling.text or ""
                node_length = len(node_content)
                # 如果兄弟节点是`<p>`,长度大于 80 
                # 且链接密度小于 0.25
                # 加进结果中
                if node_length > 80 and link_density < 0.25:
                    append = True
                elif (
                    node_length <= 80
                    and link_density == 0
                    and re.search(r"\.( |$)", node_content)
                ):
                    # 如果长度小于等于 80,没有链接,并且以句号结尾
                    # 加进结果中
                    append = True

            if append:
                # We don't want to append directly to output, but the div
                # in html->body->div
                if html_partial:
                    output.append(sibling)
                else:
                    output.getchildren()[0].getchildren()[0].append(sibling)
        # if output is not None:
        #    output.append(best_elem)
        return output

    # 选择最佳候选
    def select_best_candidate(self, candidates):
        if not candidates:
            return None

        # 将候选元素按照内容得分倒序排序
        sorted_candidates = sorted(
            candidates.values(), key=lambda x: x["content_score"], reverse=True
        )
        # 取前五个输出内容得分
        for candidate in sorted_candidates[:5]:
            elem = candidate["elem"]
            log.debug("Top 5 : %6.3f %s" % (candidate["content_score"], describe(elem)))

        # 取第一个作为最佳候选
        best_candidate = sorted_candidates[0]
        return best_candidate

    # 获取链接密度
    def get_link_density(self, elem):
        link_length = 0
        # 获取所有的`<a>`子元素
        # 求和它们的文本长度
        for i in elem.findall(".//a"):
            link_length += text_length(i)
        # if len(elem.findall(".//div") or elem.findall(".//p")):
        #    link_length = link_length
        # 计算链接文本长度除以当前节点文本长度
        total_length = text_length(elem)
        return float(link_length) / max(total_length, 1)

    # 创建候选集并给其中的节点打分
    # score = (
    #       class_weight + name_weight + 
    #       children_comma_count + 1 + min(children_text_len //  , 3)
    # ) / (1 - link_density) 
    def score_paragraphs(self):
        MIN_LEN = self.min_text_length
        candidates = {}
        ordered = []
        # 遍历每个正文、代码块和表格单元
        for elem in self.tags(self._html(), "p", "pre", "td"):
            # 获取父节点和祖父节点
            parent_node = elem.getparent()
            if parent_node is None:
                continue
            grand_parent_node = parent_node.getparent()

            # 获取内部文本,并规范化空白
            inner_text = clean(elem.text_content() or "")
            inner_text_len = len(inner_text)

            # If this paragraph is less than 25 characters
            # don't even count it.
            # 如果文本长度小于指定长度,跳过
            if inner_text_len < MIN_LEN:
                continue

            # 如果它的父节点不在候选集当中,就添加
            if parent_node not in candidates:
                candidates[parent_node] = self.score_node(parent_node)
                ordered.append(parent_node)

            # 如果它的祖父节点不在候选集当中,就添加
            if grand_parent_node is not None and grand_parent_node not in candidates:
                candidates[grand_parent_node] = self.score_node(grand_parent_node)
                ordered.append(grand_parent_node)

            # 计算子节点的内容得分,为 1 上句子数量和长度
            content_score = 1
            content_score += len(inner_text.split(","))
            content_score += min((inner_text_len / 100), 3)
            # if elem not in candidates:
            #    candidates[elem] = self.score_node(elem)

            # WTF? candidates[elem]['content_score'] += content_score
            # 父节点和祖父节点的内容得分加上子节点内容得分
            candidates[parent_node]["content_score"] += content_score
            if grand_parent_node is not None:
                candidates[grand_parent_node]["content_score"] += content_score / 2.0

        # Scale the final candidates score based on link density. Good content
        # should have a relatively small link density (5% or less) and be
        # mostly unaffected by this operation.
        for elem in ordered:
            # 对于每一个候选节点,将其得分除以`(1 - ld)`
            candidate = candidates[elem]
            ld = self.get_link_density(elem)
            score = candidate["content_score"]
            log.debug(
                "Branch %6.3f %s link density %.3f -> %6.3f"
                % (score, describe(elem), ld, score * (1 - ld))
            )
            candidate["content_score"] *= 1 - ld

        return candidates

    # 按照节点类名给节点添加权重
    def class_weight(self, e):
        weight = 0
        # 遍历节点的 ID 和类名
        for feature in [e.get("class", None), e.get("id", None)]:
            if feature:
                # 如果在预定义的正面标签和负面标签中,则加减权重
                if REGEXES["negativeRe"].search(feature):
                    weight -= 25

                if REGEXES["positiveRe"].search(feature):
                    weight += 25
                # 如果在自定义的正面标签和负面标签中,则加减权重
                if self.positive_keywords and self.positive_keywords.search(feature):
                    weight += 25

                if self.negative_keywords and self.negative_keywords.search(feature):
                    weight -= 25

        # 如果自定义标签中出现了`tag-{e.tag}`,则加减权重
        if self.positive_keywords and self.positive_keywords.match("tag-" + e.tag):
            weight += 25

        if self.negative_keywords and self.negative_keywords.match("tag-" + e.tag):
            weight -= 25

        return weight

    # 按照节点名称给节点打分
    # score_node = class_weight + name_weight
    def score_node(self, elem):
        content_score = self.class_weight(elem)
        # 获取节点名称
        name = elem.tag.lower()
        if name in ["div", "article"]:
           # 这两个分数加五,因为很可能是正文
           content_score += 5
        elif name in ["pre", "td", "blockquote"]:
            # 这两个也有可能正文,不过现在一般人不会这么干了
            content_score += 3
        elif name in ["address", "ol", "ul", "dl", "dd", "dt", "li", "form", "aside"]:
            # 这些是正文里的元素,而不是正文本身
            content_score -= 3
        elif name in [
            "h1",
            "h2",
            "h3",
            "h4",
            "h5",
            "h6",
            "th",
            "header",
            "footer",
            "nav",
        ]:
            # 这些是正文里的元素,而不是正文本身
            content_score -= 5
        return {"content_score": content_score, "elem": elem}

    # 移除不可能的候选
    def remove_unlikely_candidates(self):
        for elem in self.html.findall(".//*"):
            s = "%s %s" % (elem.get("class", ""), elem.get("id", ""))
            if len(s) < 2:
                continue
            if (
                REGEXES["unlikelyCandidatesRe"].search(s)
                and (not REGEXES["okMaybeItsACandidateRe"].search(s))
                and elem.tag not in ["html", "body"]
            ):
                log.debug("Removing unlikely candidate - %s" % describe(elem))
                elem.drop_tree()

    def transform_misused_divs_into_paragraphs(self):
        # 获取所有`<div>`元素
        for elem in self.tags(self.html, "div"):
            # transform <div>s that do not contain other block elements into
            # <p>s
            # FIXME: The current implementation ignores all descendants that
            # are not direct children of elem
            # This results in incorrect results in case there is an <img>
            # buried within an <a> for example
            # 如果元素不包含指定元素
            # 将其改为`<p>`
            if not REGEXES["divToPElementsRe"].search(
                str_(b"".join(map(tostring_, list(elem))))
            ):
                # log.debug("Altering %s to p" % (describe(elem)))
                elem.tag = "p"
                # print "Fixed element "+describe(elem)

        # 对于剩下的每个`<div>`,创建一个`<p>`
        # 把内容放在`<p>`中,再把它放在`<div>`中
        for elem in self.tags(self.html, "div"):
            if elem.text and elem.text.strip():
                p = fragment_fromstring("<p/>")
                p.text = elem.text
                elem.text = None
                elem.insert(0, p)
                # print "Appended "+tounicode(p)+" to "+describe(elem)
            # 倒序遍历`<div>`子节点
            for pos, child in reversed(list(enumerate(elem))):
                # 获取子节点与下个子节点之间的文本
                if child.tail and child.tail.strip():
                    # 如果有的话,放入`<p>`中,插回原来的文职
                    p = fragment_fromstring("<p/>")
                    p.text = child.tail
                    child.tail = None
                    elem.insert(pos + 1, p)
                    # print "Inserted "+tounicode(p)+" to "+describe(elem)
                # 移除所有`<br>`
                if child.tag == "br":
                    # print 'Dropped <br> at '+describe(elem)
                    child.drop_tree()

    # 获取当前节点下指定名称的子节点
    def tags(self, node, *tag_names):
        for tag_name in tag_names:
            for e in node.findall(".//%s" % tag_name):
                yield e
    
    # 和上一些一样,只不过是反着的
    def reverse_tags(self, node, *tag_names):
        for tag_name in tag_names:
            for e in reversed(node.findall(".//%s" % tag_name)):
                yield e

    # 整理文章
    def sanitize(self, node, candidates):
        MIN_LEN = self.min_text_length
        for header in self.tags(node, "h1", "h2", "h3", "h4", "h5", "h6"):
            if self.class_weight(header) < 0 or self.get_link_density(header) > 0.33:
                header.drop_tree()

        for elem in self.tags(node, "form", "textarea"):
            elem.drop_tree()

        for elem in self.tags(node, "iframe"):
            if "src" in elem.attrib and REGEXES["videoRe"].search(elem.attrib["src"]):
                elem.text = "VIDEO"  # ADD content to iframe text node to force <iframe></iframe> proper output
            else:
                elem.drop_tree()

        allowed = {}
        # Conditionally clean <table>s, <ul>s, and <div>s
        for el in self.reverse_tags(
            node, "table", "ul", "div", "aside", "header", "footer", "section"
        ):
            if el in allowed:
                continue
            weight = self.class_weight(el)
            if el in candidates:
                content_score = candidates[el]["content_score"]
                # print '!',el, '-> %6.3f' % content_score
            else:
                content_score = 0
            tag = el.tag

            if weight + content_score < 0:
                log.debug(
                    "Removed %s with score %6.3f and weight %-3s"
                    % (describe(el), content_score, weight,)
                )
                el.drop_tree()
            elif el.text_content().count(",") < 10:
                counts = {}
                for kind in ["p", "img", "li", "a", "embed", "input"]:
                    counts[kind] = len(el.findall(".//%s" % kind))
                counts["li"] -= 100
                counts["input"] -= len(el.findall('.//input[@type="hidden"]'))

                # Count the text length excluding any surrounding whitespace
                content_length = text_length(el)
                link_density = self.get_link_density(el)
                parent_node = el.getparent()
                if parent_node is not None:
                    if parent_node in candidates:
                        content_score = candidates[parent_node]["content_score"]
                    else:
                        content_score = 0
                # if parent_node is not None:
                # pweight = self.class_weight(parent_node) + content_score
                # pname = describe(parent_node)
                # else:
                # pweight = 0
                # pname = "no parent"
                to_remove = False
                reason = ""

                # if el.tag == 'div' and counts["img"] >= 1:
                #    continue
                if counts["p"] and counts["img"] > 1 + counts["p"] * 1.3:
                    reason = "too many images (%s)" % counts["img"]
                    to_remove = True
                elif counts["li"] > counts["p"] and tag not in ("ol", "ul"):
                    reason = "more <li>s than <p>s"
                    to_remove = True
                elif counts["input"] > (counts["p"] / 3):
                    reason = "less than 3x <p>s than <input>s"
                    to_remove = True
                elif content_length < MIN_LEN and counts["img"] == 0:
                    reason = (
                        "too short content length %s without a single image"
                        % content_length
                    )
                    to_remove = True
                elif content_length < MIN_LEN and counts["img"] > 2:
                    reason = (
                        "too short content length %s and too many images"
                        % content_length
                    )
                    to_remove = True
                elif weight < 25 and link_density > 0.2:
                    reason = "too many links %.3f for its weight %s" % (
                        link_density,
                        weight,
                    )
                    to_remove = True
                elif weight >= 25 and link_density > 0.5:
                    reason = "too many links %.3f for its weight %s" % (
                        link_density,
                        weight,
                    )
                    to_remove = True
                elif (counts["embed"] == 1 and content_length < 75) or counts[
                    "embed"
                ] > 1:
                    reason = (
                        "<embed>s with too short content length, or too many <embed>s"
                    )
                    to_remove = True
                elif not content_length:
                    reason = "no content"
                    to_remove = True
                    #                if el.tag == 'div' and counts['img'] >= 1 and to_remove:
                    #                    imgs = el.findall('.//img')
                    #                    valid_img = False
                    #                    log.debug(tounicode(el))
                    #                    for img in imgs:
                    #
                    #                        height = img.get('height')
                    #                        text_length = img.get('text_length')
                    #                        log.debug ("height %s text_length %s" %(repr(height), repr(text_length)))
                    #                        if to_int(height) >= 100 or to_int(text_length) >= 100:
                    #                            valid_img = True
                    #                            log.debug("valid image" + tounicode(img))
                    #                            break
                    #                    if valid_img:
                    #                        to_remove = False
                    #                        log.debug("Allowing %s" %el.text_content())
                    #                        for desnode in self.tags(el, "table", "ul", "div"):
                    #                            allowed[desnode] = True

                    # find x non empty preceding and succeeding siblings
                    i, j = 0, 0
                    x = 1
                    siblings = []
                    for sib in el.itersiblings():
                        # log.debug(sib.text_content())
                        sib_content_length = text_length(sib)
                        if sib_content_length:
                            i = +1
                            siblings.append(sib_content_length)
                            if i == x:
                                break
                    for sib in el.itersiblings(preceding=True):
                        # log.debug(sib.text_content())
                        sib_content_length = text_length(sib)
                        if sib_content_length:
                            j = +1
                            siblings.append(sib_content_length)
                            if j == x:
                                break
                    # log.debug(str_(siblings))
                    if siblings and sum(siblings) > 1000:
                        to_remove = False
                        log.debug("Allowing %s" % describe(el))
                        for desnode in self.tags(el, "table", "ul", "div", "section"):
                            allowed[desnode] = True

                if to_remove:
                    log.debug(
                        "Removed %6.3f %s with weight %s cause it has %s."
                        % (content_score, describe(el), weight, reason)
                    )
                    # print tounicode(el)
                    # log.debug("pname %s pweight %.3f" %(pname, pweight))
                    el.drop_tree()
                else:
                    log.debug(
                        "Not removing %s of length %s: %s"
                        % (describe(el), content_length, text_content(el))
                    )

        self.html = node
        return self.get_clean_html()


def main():
    VERBOSITY = {1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG}

    from optparse import OptionParser

    parser = OptionParser(usage="%prog: [options] [file]")
    parser.add_option("-v", "--verbose", action="count", default=0)
    parser.add_option(
        "-b", "--browser", default=None, action="store_true", help="open in browser"
    )
    parser.add_option(
        "-l", "--log", default=None, help="save logs into file (appended)"
    )
    parser.add_option(
        "-u", "--url", default=None, help="use URL instead of a local file"
    )
    parser.add_option("-x", "--xpath", default=None, help="add original xpath")
    parser.add_option(
        "-p",
        "--positive-keywords",
        default=None,
        help="positive keywords (comma-separated)",
        action="store",
    )
    parser.add_option(
        "-n",
        "--negative-keywords",
        default=None,
        help="negative keywords (comma-separated)",
        action="store",
    )
    (options, args) = parser.parse_args()

    if options.verbose:
        logging.basicConfig(
            level=VERBOSITY[options.verbose],
            filename=options.log,
            format="%(asctime)s: %(levelname)s: %(message)s (at %(filename)s: %(lineno)d)",
        )

    if not (len(args) == 1 or options.url):
        parser.print_help()
        sys.exit(1)

    file = None
    if options.url:
        headers = {"User-Agent": "Mozilla/5.0"}
        if sys.version_info[0] == 3:
            import urllib.request, urllib.parse, urllib.error

            request = urllib.request.Request(options.url, None, headers)
            file = urllib.request.urlopen(request)
        else:
            import urllib2

            request = urllib2.Request(options.url, None, headers)
            file = urllib2.urlopen(request)
    else:
        file = open(args[0], "rt")
    try:
        doc = Document(
            file.read(),
            url=options.url,
            positive_keywords=options.positive_keywords,
            negative_keywords=options.negative_keywords,
        )
        if options.browser:
            from .browser import open_in_browser

            result = "<h2>" + doc.short_title() + "</h2><br/>" + doc.summary()
            open_in_browser(result)
        else:
            enc = (
                sys.__stdout__.encoding or "utf-8"
            )  # XXX: this hack could not always work, better to set PYTHONIOENCODING
            result = "Title:" + doc.short_title() + "\n" + doc.summary()
            if sys.version_info[0] == 3:
                print(result)
            else:
                print(result.encode(enc, "replace"))
    finally:
        file.close()


if __name__ == "__main__":
    main()
本文参与 腾讯云自媒体同步曝光计划,分享自作者个人站点/博客。
原始发表:2023-10-11,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 作者个人站点/博客 前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体同步曝光计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
相关产品与服务
容器服务
腾讯云容器服务(Tencent Kubernetes Engine, TKE)基于原生 kubernetes 提供以容器为核心的、高度可扩展的高性能容器管理服务,覆盖 Serverless、边缘计算、分布式云等多种业务部署场景,业内首创单个集群兼容多种计算节点的容器资源管理模式。同时产品作为云原生 Finops 领先布道者,主导开源项目Crane,全面助力客户实现资源优化、成本控制。
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档