-
-
Save ergoithz/6cf043e3fdedd1b94fcf to your computer and use it in GitHub Desktop.
#!/usr/bin/python | |
# -*- coding: utf-8 -*- | |
import bs4 | |
def xpath_soup(element): | |
# type: (typing.Union[bs4.element.Tag, bs4.element.NavigableString]) -> str | |
""" | |
Generate xpath from BeautifulSoup4 element. | |
:param element: BeautifulSoup4 element. | |
:type element: bs4.element.Tag or bs4.element.NavigableString | |
:return: xpath as string | |
:rtype: str | |
Usage | |
----- | |
>>> import bs4 | |
>>> html = ( | |
... '<html><head><title>title</title></head>' | |
... '<body><p>p <i>1</i></p><p>p <i>2</i></p></body></html>' | |
... ) | |
>>> soup = bs4.BeautifulSoup(html, 'html.parser') | |
>>> xpath_soup(soup.html.body.p.i) | |
'/html/body/p[1]/i' | |
>>> import bs4 | |
>>> xml = ( | |
... '<?xml version="1.0" encoding="UTF-8"?>' | |
... '<doc xmlns:ns1="http://localhost/ns1"' | |
... ' xmlns:ns2="http://localhost/ns2">' | |
... '<ns1:elm/><ns2:elm/><ns2:elm/></doc>' | |
... ) | |
>>> soup = bs4.BeautifulSoup(xml, 'lxml-xml') | |
>>> xpath_soup(soup.doc.find('ns2:elm').next_sibling) | |
'/doc/ns2:elm[2]' | |
""" | |
components = [] | |
target = element if element.name else element.parent | |
for node in (target, *target.parents)[-2::-1]: # type: bs4.element.Tag | |
tag = '%s:%s' % (node.prefix, node.name) if node.prefix else node.name | |
siblings = node.parent.find_all(tag, recursive=False) | |
components.append(tag if len(siblings) == 1 else '%s[%d]' % (tag, next( | |
index | |
for index, sibling in enumerate(siblings, 1) | |
if sibling is node | |
))) | |
return '/%s' % '/'.join(components) | |
if __name__ == '__main__': | |
import doctest | |
doctest.testmod(verbose=True, raise_on_error=True) |
Amazing! Thank you so much!
Thanks a bunch! Do you have a license for the code?
You are my hero.
I'm going to start removing every single Codility-related comment here. This is not the right place for cheaters/lazy people to share that kind of stuff.
Wow it is awesome, you are a genius. Thanks!
nice job 👍
Hi @ergoithz,
I faced a little problem if the XML element had a namespace prefix, the xpath returned by this function would not include the prefix. Looks like that:
<?xml version="1.0" encoding="UTF-8"?>
<xfa:datasets xmlns:xfa="http://www.xfa.org/schema/xfa-data/1.0/">
<data>ABC</data>
</xfa:datasets>
The output of tag will be /datasets/data
, but I preferred it kept /xfa:datasets/data
So I modified the code to let it keep the XML namespace prefix. Hope you have some time to review it.
def xpath_soup(element, keep_prefix: bool=True):
components = []
child = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
siblings = parent.find_all(child.name, recursive=False)
child_name = f'{child.prefix}:{child.name}' if child.prefix and keep_prefix else child.name
components.append(
child_name if 1 == len(siblings) else '%s[%d]' % (
child_name,
next(i for i, s in enumerate(siblings, 1) if s is child)
)
)
child = parent
components.reverse()
return '/%s' % '/'.join(components)
@funway thanks for the heads up, snippet updated with your suggestion.
(You might take a look at your code tho, your sibling search wasn't prefixed).
Sorry, but I do not have solutions to random poorly-reported errors triggered by third-party libraries well outside this snippet execution, but this probably caused by a lack of understanding on how XPATH works and what this snippet really does.
Things to try: