##// END OF EJS Templates
match: add `filepath:` pattern to match an exact filepath relative to the root...
match: add `filepath:` pattern to match an exact filepath relative to the root It's useful in certain automated workflows to make sure we recurse in directories whose name conflicts with files in other revisions. In addition it makes it possible to avoid building a potentially costly regex, improving performance when the set of files to match explicitly is large. The benchmark below are run in the following configuration : # data-env-vars.name = mozilla-central-2018-08-01-zstd-sparse-revlog # benchmark.name = files # benchmark.variants.rev = tip # benchmark.variants.files = all-list-filepath-sorted # bin-env-vars.hg.flavor = no-rust It also includes timings using the re2 engine (through the `google-re2` module) to show how much can be saved by just using a better regexp engine. Pattern time (seconds) time using re2 ----------------------------------------------------------- just "." 0.4 0.4 list of "filepath:…" 1.3 1.3 list of "path:…" 25.7 3.9 list of patterns 29.7 10.4 As you can see, Without re2, using "filepath:" instead of "path:" is a huge win. With re2, it is still about three times faster to not have to build the regex.

File last commit:

r49801:642e31cb default
r51588:1c31b343 default
Show More
urllibcompat.py
153 lines | 3.5 KiB | text/x-python | PythonLexer
# urllibcompat.py - adapters to ease using urllib2 on Py2 and urllib on Py3
#
# Copyright 2017 Google, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import http.server
import urllib.error
import urllib.parse
import urllib.request
import urllib.response
from .pycompat import getattr
from . import pycompat
_sysstr = pycompat.sysstr
class _pycompatstub:
def __init__(self):
self._aliases = {}
def _registeraliases(self, origin, items):
"""Add items that will be populated at the first access"""
items = map(_sysstr, items)
self._aliases.update(
(item.replace('_', '').lower(), (origin, item)) for item in items
)
def _registeralias(self, origin, attr, name):
"""Alias ``origin``.``attr`` as ``name``"""
self._aliases[_sysstr(name)] = (origin, _sysstr(attr))
def __getattr__(self, name):
try:
origin, item = self._aliases[name]
except KeyError:
raise AttributeError(name)
self.__dict__[name] = obj = getattr(origin, item)
return obj
httpserver = _pycompatstub()
urlreq = _pycompatstub()
urlerr = _pycompatstub()
urlreq._registeraliases(
urllib.parse,
(
b"splitattr",
b"splitpasswd",
b"splitport",
b"splituser",
b"urlparse",
b"urlunparse",
),
)
urlreq._registeralias(urllib.parse, b"parse_qs", b"parseqs")
urlreq._registeralias(urllib.parse, b"parse_qsl", b"parseqsl")
urlreq._registeralias(urllib.parse, b"unquote_to_bytes", b"unquote")
urlreq._registeraliases(
urllib.request,
(
b"AbstractHTTPHandler",
b"BaseHandler",
b"build_opener",
b"FileHandler",
b"FTPHandler",
b"ftpwrapper",
b"HTTPHandler",
b"HTTPSHandler",
b"install_opener",
b"pathname2url",
b"HTTPBasicAuthHandler",
b"HTTPDigestAuthHandler",
b"HTTPPasswordMgrWithDefaultRealm",
b"ProxyHandler",
b"Request",
b"url2pathname",
b"urlopen",
),
)
urlreq._registeraliases(
urllib.response,
(
b"addclosehook",
b"addinfourl",
),
)
urlerr._registeraliases(
urllib.error,
(
b"HTTPError",
b"URLError",
),
)
httpserver._registeraliases(
http.server,
(
b"HTTPServer",
b"BaseHTTPRequestHandler",
b"SimpleHTTPRequestHandler",
b"CGIHTTPRequestHandler",
),
)
# urllib.parse.quote() accepts both str and bytes, decodes bytes
# (if necessary), and returns str. This is wonky. We provide a custom
# implementation that only accepts bytes and emits bytes.
def quote(s, safe='/'):
# bytestr has an __iter__ that emits characters. quote_from_bytes()
# does an iteration and expects ints. We coerce to bytes to appease it.
if isinstance(s, pycompat.bytestr):
s = bytes(s)
s = urllib.parse.quote_from_bytes(s, safe=safe)
return s.encode('ascii', 'strict')
# urllib.parse.urlencode() returns str. We use this function to make
# sure we return bytes.
def urlencode(query, doseq=False):
s = urllib.parse.urlencode(query, doseq=doseq)
return s.encode('ascii')
urlreq.quote = quote
urlreq.urlencode = urlencode
def getfullurl(req):
return req.full_url
def gethost(req):
return req.host
def getselector(req):
return req.selector
def getdata(req):
return req.data
def hasdata(req):
return req.data is not None