|
|
# RhodeCode VCSServer provides access to different vcs backends via network.
|
|
|
# Copyright (C) 2014-2023 RhodeCode GmbH
|
|
|
#
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
# the Free Software Foundation; either version 3 of the License, or
|
|
|
# (at your option) any later version.
|
|
|
#
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
# GNU General Public License for more details.
|
|
|
#
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
# along with this program; if not, write to the Free Software Foundation,
|
|
|
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
import binascii
|
|
|
import io
|
|
|
import logging
|
|
|
import stat
|
|
|
import sys
|
|
|
import urllib.request
|
|
|
import urllib.parse
|
|
|
import hashlib
|
|
|
|
|
|
from hgext import largefiles, rebase, purge
|
|
|
|
|
|
from mercurial import commands
|
|
|
from mercurial import unionrepo
|
|
|
from mercurial import verify
|
|
|
from mercurial import repair
|
|
|
from mercurial.error import AmbiguousPrefixLookupError
|
|
|
|
|
|
import vcsserver
|
|
|
from vcsserver import exceptions
|
|
|
from vcsserver.base import (
|
|
|
RepoFactory,
|
|
|
obfuscate_qs,
|
|
|
raise_from_original,
|
|
|
store_archive_in_cache,
|
|
|
ArchiveNode,
|
|
|
BytesEnvelope,
|
|
|
BinaryEnvelope,
|
|
|
)
|
|
|
from vcsserver.hgcompat import (
|
|
|
archival,
|
|
|
bin,
|
|
|
clone,
|
|
|
config as hgconfig,
|
|
|
diffopts,
|
|
|
hex,
|
|
|
get_ctx,
|
|
|
hg_url as url_parser,
|
|
|
httpbasicauthhandler,
|
|
|
httpdigestauthhandler,
|
|
|
makepeer,
|
|
|
instance,
|
|
|
match,
|
|
|
memctx,
|
|
|
exchange,
|
|
|
memfilectx,
|
|
|
nullrev,
|
|
|
hg_merge,
|
|
|
patch,
|
|
|
peer,
|
|
|
revrange,
|
|
|
ui,
|
|
|
hg_tag,
|
|
|
Abort,
|
|
|
LookupError,
|
|
|
RepoError,
|
|
|
RepoLookupError,
|
|
|
InterventionRequired,
|
|
|
RequirementError,
|
|
|
alwaysmatcher,
|
|
|
patternmatcher,
|
|
|
hgutil,
|
|
|
hgext_strip,
|
|
|
)
|
|
|
from vcsserver.str_utils import ascii_bytes, ascii_str, safe_str, safe_bytes
|
|
|
from vcsserver.vcs_base import RemoteBase
|
|
|
from vcsserver.config import hooks as hooks_config
|
|
|
from vcsserver.lib.exc_tracking import format_exc
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
def make_ui_from_config(repo_config):
|
|
|
|
|
|
class LoggingUI(ui.ui):
|
|
|
|
|
|
def status(self, *msg, **opts):
|
|
|
str_msg = map(safe_str, msg)
|
|
|
log.info(' '.join(str_msg).rstrip('\n'))
|
|
|
#super(LoggingUI, self).status(*msg, **opts)
|
|
|
|
|
|
def warn(self, *msg, **opts):
|
|
|
str_msg = map(safe_str, msg)
|
|
|
log.warning('ui_logger:'+' '.join(str_msg).rstrip('\n'))
|
|
|
#super(LoggingUI, self).warn(*msg, **opts)
|
|
|
|
|
|
def error(self, *msg, **opts):
|
|
|
str_msg = map(safe_str, msg)
|
|
|
log.error('ui_logger:'+' '.join(str_msg).rstrip('\n'))
|
|
|
#super(LoggingUI, self).error(*msg, **opts)
|
|
|
|
|
|
def note(self, *msg, **opts):
|
|
|
str_msg = map(safe_str, msg)
|
|
|
log.info('ui_logger:'+' '.join(str_msg).rstrip('\n'))
|
|
|
#super(LoggingUI, self).note(*msg, **opts)
|
|
|
|
|
|
def debug(self, *msg, **opts):
|
|
|
str_msg = map(safe_str, msg)
|
|
|
log.debug('ui_logger:'+' '.join(str_msg).rstrip('\n'))
|
|
|
#super(LoggingUI, self).debug(*msg, **opts)
|
|
|
|
|
|
baseui = LoggingUI()
|
|
|
|
|
|
# clean the baseui object
|
|
|
baseui._ocfg = hgconfig.config()
|
|
|
baseui._ucfg = hgconfig.config()
|
|
|
baseui._tcfg = hgconfig.config()
|
|
|
|
|
|
for section, option, value in repo_config:
|
|
|
baseui.setconfig(ascii_bytes(section), ascii_bytes(option), ascii_bytes(value))
|
|
|
|
|
|
# make our hgweb quiet so it doesn't print output
|
|
|
baseui.setconfig(b'ui', b'quiet', b'true')
|
|
|
|
|
|
baseui.setconfig(b'ui', b'paginate', b'never')
|
|
|
# for better Error reporting of Mercurial
|
|
|
baseui.setconfig(b'ui', b'message-output', b'stderr')
|
|
|
|
|
|
# force mercurial to only use 1 thread, otherwise it may try to set a
|
|
|
# signal in a non-main thread, thus generating a ValueError.
|
|
|
baseui.setconfig(b'worker', b'numcpus', 1)
|
|
|
|
|
|
# If there is no config for the largefiles extension, we explicitly disable
|
|
|
# it here. This overrides settings from repositories hgrc file. Recent
|
|
|
# mercurial versions enable largefiles in hgrc on clone from largefile
|
|
|
# repo.
|
|
|
if not baseui.hasconfig(b'extensions', b'largefiles'):
|
|
|
log.debug('Explicitly disable largefiles extension for repo.')
|
|
|
baseui.setconfig(b'extensions', b'largefiles', b'!')
|
|
|
|
|
|
return baseui
|
|
|
|
|
|
|
|
|
def reraise_safe_exceptions(func):
|
|
|
"""Decorator for converting mercurial exceptions to something neutral."""
|
|
|
|
|
|
def wrapper(*args, **kwargs):
|
|
|
try:
|
|
|
return func(*args, **kwargs)
|
|
|
except (Abort, InterventionRequired) as e:
|
|
|
raise_from_original(exceptions.AbortException(e), e)
|
|
|
except RepoLookupError as e:
|
|
|
raise_from_original(exceptions.LookupException(e), e)
|
|
|
except RequirementError as e:
|
|
|
raise_from_original(exceptions.RequirementException(e), e)
|
|
|
except RepoError as e:
|
|
|
raise_from_original(exceptions.VcsException(e), e)
|
|
|
except LookupError as e:
|
|
|
raise_from_original(exceptions.LookupException(e), e)
|
|
|
except Exception as e:
|
|
|
if not hasattr(e, '_vcs_kind'):
|
|
|
log.exception("Unhandled exception in hg remote call")
|
|
|
raise_from_original(exceptions.UnhandledException(e), e)
|
|
|
|
|
|
raise
|
|
|
return wrapper
|
|
|
|
|
|
|
|
|
class MercurialFactory(RepoFactory):
|
|
|
repo_type = 'hg'
|
|
|
|
|
|
def _create_config(self, config, hooks=True):
|
|
|
if not hooks:
|
|
|
|
|
|
hooks_to_clean = {
|
|
|
|
|
|
hooks_config.HOOK_REPO_SIZE,
|
|
|
hooks_config.HOOK_PRE_PULL,
|
|
|
hooks_config.HOOK_PULL,
|
|
|
|
|
|
hooks_config.HOOK_PRE_PUSH,
|
|
|
# TODO: what about PRETXT, this was disabled in pre 5.0.0
|
|
|
hooks_config.HOOK_PRETX_PUSH,
|
|
|
|
|
|
}
|
|
|
new_config = []
|
|
|
for section, option, value in config:
|
|
|
if section == 'hooks' and option in hooks_to_clean:
|
|
|
continue
|
|
|
new_config.append((section, option, value))
|
|
|
config = new_config
|
|
|
|
|
|
baseui = make_ui_from_config(config)
|
|
|
return baseui
|
|
|
|
|
|
def _create_repo(self, wire, create):
|
|
|
baseui = self._create_config(wire["config"])
|
|
|
repo = instance(baseui, safe_bytes(wire["path"]), create)
|
|
|
log.debug('repository created: got HG object: %s', repo)
|
|
|
return repo
|
|
|
|
|
|
def repo(self, wire, create=False):
|
|
|
"""
|
|
|
Get a repository instance for the given path.
|
|
|
"""
|
|
|
return self._create_repo(wire, create)
|
|
|
|
|
|
|
|
|
def patch_ui_message_output(baseui):
|
|
|
baseui.setconfig(b'ui', b'quiet', b'false')
|
|
|
output = io.BytesIO()
|
|
|
|
|
|
def write(data, **unused_kwargs):
|
|
|
output.write(data)
|
|
|
|
|
|
baseui.status = write
|
|
|
baseui.write = write
|
|
|
baseui.warn = write
|
|
|
baseui.debug = write
|
|
|
|
|
|
return baseui, output
|
|
|
|
|
|
|
|
|
def get_obfuscated_url(url_obj):
|
|
|
url_obj.passwd = b'*****' if url_obj.passwd else url_obj.passwd
|
|
|
url_obj.query = obfuscate_qs(url_obj.query)
|
|
|
obfuscated_uri = str(url_obj)
|
|
|
return obfuscated_uri
|
|
|
|
|
|
|
|
|
def normalize_url_for_hg(url: str):
|
|
|
_proto = None
|
|
|
|
|
|
if '+' in url[:url.find('://')]:
|
|
|
_proto = url[0:url.find('+')]
|
|
|
url = url[url.find('+') + 1:]
|
|
|
return url, _proto
|
|
|
|
|
|
|
|
|
class HgRemote(RemoteBase):
|
|
|
|
|
|
def __init__(self, factory):
|
|
|
self._factory = factory
|
|
|
self._bulk_methods = {
|
|
|
"affected_files": self.ctx_files,
|
|
|
"author": self.ctx_user,
|
|
|
"branch": self.ctx_branch,
|
|
|
"children": self.ctx_children,
|
|
|
"date": self.ctx_date,
|
|
|
"message": self.ctx_description,
|
|
|
"parents": self.ctx_parents,
|
|
|
"status": self.ctx_status,
|
|
|
"obsolete": self.ctx_obsolete,
|
|
|
"phase": self.ctx_phase,
|
|
|
"hidden": self.ctx_hidden,
|
|
|
"_file_paths": self.ctx_list,
|
|
|
}
|
|
|
self._bulk_file_methods = {
|
|
|
"size": self.fctx_size,
|
|
|
"data": self.fctx_node_data,
|
|
|
"flags": self.fctx_flags,
|
|
|
"is_binary": self.is_binary,
|
|
|
"md5": self.md5_hash,
|
|
|
}
|
|
|
|
|
|
def _get_ctx(self, repo, ref):
|
|
|
return get_ctx(repo, ref)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def discover_hg_version(self):
|
|
|
from mercurial import util
|
|
|
return safe_str(util.version())
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def is_empty(self, wire):
|
|
|
repo = self._factory.repo(wire)
|
|
|
|
|
|
try:
|
|
|
return len(repo) == 0
|
|
|
except Exception:
|
|
|
log.exception("failed to read object_store")
|
|
|
return False
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def bookmarks(self, wire):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _bookmarks(_context_uid, _repo_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
return {safe_str(name): ascii_str(hex(sha)) for name, sha in repo._bookmarks.items()}
|
|
|
|
|
|
return _bookmarks(context_uid, repo_id)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def branches(self, wire, normal, closed):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _branches(_context_uid, _repo_id, _normal, _closed):
|
|
|
repo = self._factory.repo(wire)
|
|
|
iter_branches = repo.branchmap().iterbranches()
|
|
|
bt = {}
|
|
|
for branch_name, _heads, tip_node, is_closed in iter_branches:
|
|
|
if normal and not is_closed:
|
|
|
bt[safe_str(branch_name)] = ascii_str(hex(tip_node))
|
|
|
if closed and is_closed:
|
|
|
bt[safe_str(branch_name)] = ascii_str(hex(tip_node))
|
|
|
|
|
|
return bt
|
|
|
|
|
|
return _branches(context_uid, repo_id, normal, closed)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def bulk_request(self, wire, commit_id, pre_load):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _bulk_request(_repo_id, _commit_id, _pre_load):
|
|
|
result = {}
|
|
|
for attr in pre_load:
|
|
|
try:
|
|
|
method = self._bulk_methods[attr]
|
|
|
wire.update({'cache': False}) # disable cache for bulk calls so we don't double cache
|
|
|
result[attr] = method(wire, commit_id)
|
|
|
except KeyError as e:
|
|
|
raise exceptions.VcsException(e)(
|
|
|
f'Unknown bulk attribute: "{attr}"')
|
|
|
return result
|
|
|
|
|
|
return _bulk_request(repo_id, commit_id, sorted(pre_load))
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_branch(self, wire, commit_id):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _ctx_branch(_repo_id, _commit_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, commit_id)
|
|
|
return ctx.branch()
|
|
|
return _ctx_branch(repo_id, commit_id)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_date(self, wire, commit_id):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _ctx_date(_repo_id, _commit_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, commit_id)
|
|
|
return ctx.date()
|
|
|
return _ctx_date(repo_id, commit_id)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_description(self, wire, revision):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
return ctx.description()
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_files(self, wire, commit_id):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _ctx_files(_repo_id, _commit_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, commit_id)
|
|
|
return ctx.files()
|
|
|
|
|
|
return _ctx_files(repo_id, commit_id)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_list(self, path, revision):
|
|
|
repo = self._factory.repo(path)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
return list(ctx)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_parents(self, wire, commit_id):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _ctx_parents(_repo_id, _commit_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, commit_id)
|
|
|
return [parent.hex() for parent in ctx.parents()
|
|
|
if not (parent.hidden() or parent.obsolete())]
|
|
|
|
|
|
return _ctx_parents(repo_id, commit_id)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_children(self, wire, commit_id):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _ctx_children(_repo_id, _commit_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, commit_id)
|
|
|
return [child.hex() for child in ctx.children()
|
|
|
if not (child.hidden() or child.obsolete())]
|
|
|
|
|
|
return _ctx_children(repo_id, commit_id)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_phase(self, wire, commit_id):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _ctx_phase(_context_uid, _repo_id, _commit_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, commit_id)
|
|
|
# public=0, draft=1, secret=3
|
|
|
return ctx.phase()
|
|
|
return _ctx_phase(context_uid, repo_id, commit_id)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_obsolete(self, wire, commit_id):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _ctx_obsolete(_context_uid, _repo_id, _commit_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, commit_id)
|
|
|
return ctx.obsolete()
|
|
|
return _ctx_obsolete(context_uid, repo_id, commit_id)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_hidden(self, wire, commit_id):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _ctx_hidden(_context_uid, _repo_id, _commit_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, commit_id)
|
|
|
return ctx.hidden()
|
|
|
return _ctx_hidden(context_uid, repo_id, commit_id)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_substate(self, wire, revision):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
return ctx.substate
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_status(self, wire, revision):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
status = repo[ctx.p1().node()].status(other=ctx.node())
|
|
|
# object of status (odd, custom named tuple in mercurial) is not
|
|
|
# correctly serializable, we make it a list, as the underling
|
|
|
# API expects this to be a list
|
|
|
return list(status)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ctx_user(self, wire, revision):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
return ctx.user()
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def check_url(self, url, config):
|
|
|
url, _proto = normalize_url_for_hg(url)
|
|
|
url_obj = url_parser(safe_bytes(url))
|
|
|
|
|
|
test_uri = safe_str(url_obj.authinfo()[0])
|
|
|
authinfo = url_obj.authinfo()[1]
|
|
|
obfuscated_uri = get_obfuscated_url(url_obj)
|
|
|
log.info("Checking URL for remote cloning/import: %s", obfuscated_uri)
|
|
|
|
|
|
handlers = []
|
|
|
if authinfo:
|
|
|
# create a password manager
|
|
|
passmgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
|
|
|
passmgr.add_password(*authinfo)
|
|
|
|
|
|
handlers.extend((httpbasicauthhandler(passmgr),
|
|
|
httpdigestauthhandler(passmgr)))
|
|
|
|
|
|
o = urllib.request.build_opener(*handlers)
|
|
|
o.addheaders = [('Content-Type', 'application/mercurial-0.1'),
|
|
|
('Accept', 'application/mercurial-0.1')]
|
|
|
|
|
|
q = {"cmd": 'between'}
|
|
|
q.update({'pairs': "{}-{}".format('0' * 40, '0' * 40)})
|
|
|
qs = f'?{urllib.parse.urlencode(q)}'
|
|
|
cu = f"{test_uri}{qs}"
|
|
|
req = urllib.request.Request(cu, None, {})
|
|
|
|
|
|
try:
|
|
|
log.debug("Trying to open URL %s", obfuscated_uri)
|
|
|
resp = o.open(req)
|
|
|
if resp.code != 200:
|
|
|
raise exceptions.URLError()('Return Code is not 200')
|
|
|
except Exception as e:
|
|
|
log.warning("URL cannot be opened: %s", obfuscated_uri, exc_info=True)
|
|
|
# means it cannot be cloned
|
|
|
raise exceptions.URLError(e)(f"[{obfuscated_uri}] org_exc: {e}")
|
|
|
|
|
|
# now check if it's a proper hg repo, but don't do it for svn
|
|
|
try:
|
|
|
if _proto == 'svn':
|
|
|
pass
|
|
|
else:
|
|
|
# check for pure hg repos
|
|
|
log.debug(
|
|
|
"Verifying if URL is a Mercurial repository: %s", obfuscated_uri)
|
|
|
ui = make_ui_from_config(config)
|
|
|
peer_checker = makepeer(ui, safe_bytes(url))
|
|
|
peer_checker.lookup(b'tip')
|
|
|
except Exception as e:
|
|
|
log.warning("URL is not a valid Mercurial repository: %s",
|
|
|
obfuscated_uri)
|
|
|
raise exceptions.URLError(e)(
|
|
|
f"url [{obfuscated_uri}] does not look like an hg repo org_exc: {e}")
|
|
|
|
|
|
log.info("URL is a valid Mercurial repository: %s", obfuscated_uri)
|
|
|
return True
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def diff(self, wire, commit_id_1, commit_id_2, file_filter, opt_git, opt_ignorews, context):
|
|
|
repo = self._factory.repo(wire)
|
|
|
|
|
|
if file_filter:
|
|
|
# unpack the file-filter
|
|
|
repo_path, node_path = file_filter
|
|
|
match_filter = match(safe_bytes(repo_path), b'', [safe_bytes(node_path)])
|
|
|
else:
|
|
|
match_filter = file_filter
|
|
|
opts = diffopts(git=opt_git, ignorews=opt_ignorews, context=context, showfunc=1)
|
|
|
|
|
|
try:
|
|
|
diff_iter = patch.diff(
|
|
|
repo, node1=commit_id_1, node2=commit_id_2, match=match_filter, opts=opts)
|
|
|
return BytesEnvelope(b"".join(diff_iter))
|
|
|
except RepoLookupError as e:
|
|
|
raise exceptions.LookupException(e)()
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def node_history(self, wire, revision, path, limit):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _node_history(_context_uid, _repo_id, _revision, _path, _limit):
|
|
|
repo = self._factory.repo(wire)
|
|
|
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
fctx = ctx.filectx(safe_bytes(path))
|
|
|
|
|
|
def history_iter():
|
|
|
limit_rev = fctx.rev()
|
|
|
for fctx_candidate in reversed(list(fctx.filelog())):
|
|
|
f_obj = fctx.filectx(fctx_candidate)
|
|
|
|
|
|
# NOTE: This can be problematic...we can hide ONLY history node resulting in empty history
|
|
|
_ctx = obj.changectx()
|
|
|
if _ctx.hidden() or _ctx.obsolete():
|
|
|
continue
|
|
|
|
|
|
if limit_rev >= f_obj.rev():
|
|
|
yield f_obj
|
|
|
|
|
|
history = []
|
|
|
for cnt, obj in enumerate(history_iter()):
|
|
|
if limit and cnt >= limit:
|
|
|
break
|
|
|
history.append(hex(obj.node()))
|
|
|
|
|
|
return [x for x in history]
|
|
|
return _node_history(context_uid, repo_id, revision, path, limit)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def node_history_until(self, wire, revision, path, limit):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _node_history_until(_context_uid, _repo_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
fctx = ctx.filectx(safe_bytes(path))
|
|
|
|
|
|
file_log = list(fctx.filelog())
|
|
|
if limit:
|
|
|
# Limit to the last n items
|
|
|
file_log = file_log[-limit:]
|
|
|
|
|
|
return [hex(fctx.filectx(cs).node()) for cs in reversed(file_log)]
|
|
|
return _node_history_until(context_uid, repo_id, revision, path, limit)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def bulk_file_request(self, wire, commit_id, path, pre_load):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _bulk_file_request(_repo_id, _commit_id, _path, _pre_load):
|
|
|
result = {}
|
|
|
for attr in pre_load:
|
|
|
try:
|
|
|
method = self._bulk_file_methods[attr]
|
|
|
wire.update({'cache': False}) # disable cache for bulk calls so we don't double cache
|
|
|
result[attr] = method(wire, _commit_id, _path)
|
|
|
except KeyError as e:
|
|
|
raise exceptions.VcsException(e)(f'Unknown bulk attribute: "{attr}"')
|
|
|
return result
|
|
|
|
|
|
return BinaryEnvelope(_bulk_file_request(repo_id, commit_id, path, sorted(pre_load)))
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def fctx_annotate(self, wire, revision, path):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
fctx = ctx.filectx(safe_bytes(path))
|
|
|
|
|
|
result = []
|
|
|
for i, annotate_obj in enumerate(fctx.annotate(), 1):
|
|
|
ln_no = i
|
|
|
sha = hex(annotate_obj.fctx.node())
|
|
|
content = annotate_obj.text
|
|
|
result.append((ln_no, ascii_str(sha), content))
|
|
|
return BinaryEnvelope(result)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def fctx_node_data(self, wire, revision, path):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
fctx = ctx.filectx(safe_bytes(path))
|
|
|
return BytesEnvelope(fctx.data())
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def fctx_flags(self, wire, commit_id, path):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _fctx_flags(_repo_id, _commit_id, _path):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, commit_id)
|
|
|
fctx = ctx.filectx(safe_bytes(path))
|
|
|
return fctx.flags()
|
|
|
|
|
|
return _fctx_flags(repo_id, commit_id, path)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def fctx_size(self, wire, commit_id, path):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _fctx_size(_repo_id, _revision, _path):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, commit_id)
|
|
|
fctx = ctx.filectx(safe_bytes(path))
|
|
|
return fctx.size()
|
|
|
return _fctx_size(repo_id, commit_id, path)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def get_all_commit_ids(self, wire, name):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _get_all_commit_ids(_context_uid, _repo_id, _name):
|
|
|
repo = self._factory.repo(wire)
|
|
|
revs = [ascii_str(repo[x].hex()) for x in repo.filtered(b'visible').changelog.revs()]
|
|
|
return revs
|
|
|
return _get_all_commit_ids(context_uid, repo_id, name)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def get_config_value(self, wire, section, name, untrusted=False):
|
|
|
repo = self._factory.repo(wire)
|
|
|
return repo.ui.config(ascii_bytes(section), ascii_bytes(name), untrusted=untrusted)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def is_large_file(self, wire, commit_id, path):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _is_large_file(_context_uid, _repo_id, _commit_id, _path):
|
|
|
return largefiles.lfutil.isstandin(safe_bytes(path))
|
|
|
|
|
|
return _is_large_file(context_uid, repo_id, commit_id, path)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def is_binary(self, wire, revision, path):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _is_binary(_repo_id, _sha, _path):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
fctx = ctx.filectx(safe_bytes(path))
|
|
|
return fctx.isbinary()
|
|
|
|
|
|
return _is_binary(repo_id, revision, path)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def md5_hash(self, wire, revision, path):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _md5_hash(_repo_id, _sha, _path):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
fctx = ctx.filectx(safe_bytes(path))
|
|
|
return hashlib.md5(fctx.data()).hexdigest()
|
|
|
|
|
|
return _md5_hash(repo_id, revision, path)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def in_largefiles_store(self, wire, sha):
|
|
|
repo = self._factory.repo(wire)
|
|
|
return largefiles.lfutil.instore(repo, sha)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def in_user_cache(self, wire, sha):
|
|
|
repo = self._factory.repo(wire)
|
|
|
return largefiles.lfutil.inusercache(repo.ui, sha)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def store_path(self, wire, sha):
|
|
|
repo = self._factory.repo(wire)
|
|
|
return largefiles.lfutil.storepath(repo, sha)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def link(self, wire, sha, path):
|
|
|
repo = self._factory.repo(wire)
|
|
|
largefiles.lfutil.link(
|
|
|
largefiles.lfutil.usercachepath(repo.ui, sha), path)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def localrepository(self, wire, create=False):
|
|
|
self._factory.repo(wire, create=create)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def lookup(self, wire, revision, both):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _lookup(_context_uid, _repo_id, _revision, _both):
|
|
|
repo = self._factory.repo(wire)
|
|
|
rev = _revision
|
|
|
if isinstance(rev, int):
|
|
|
# NOTE(marcink):
|
|
|
# since Mercurial doesn't support negative indexes properly
|
|
|
# we need to shift accordingly by one to get proper index, e.g
|
|
|
# repo[-1] => repo[-2]
|
|
|
# repo[0] => repo[-1]
|
|
|
if rev <= 0:
|
|
|
rev = rev + -1
|
|
|
try:
|
|
|
ctx = self._get_ctx(repo, rev)
|
|
|
except AmbiguousPrefixLookupError:
|
|
|
e = RepoLookupError(rev)
|
|
|
e._org_exc_tb = format_exc(sys.exc_info())
|
|
|
raise exceptions.LookupException(e)(rev)
|
|
|
except (TypeError, RepoLookupError, binascii.Error) as e:
|
|
|
e._org_exc_tb = format_exc(sys.exc_info())
|
|
|
raise exceptions.LookupException(e)(rev)
|
|
|
except LookupError as e:
|
|
|
e._org_exc_tb = format_exc(sys.exc_info())
|
|
|
raise exceptions.LookupException(e)(e.name)
|
|
|
|
|
|
if not both:
|
|
|
return ctx.hex()
|
|
|
|
|
|
ctx = repo[ctx.hex()]
|
|
|
return ctx.hex(), ctx.rev()
|
|
|
|
|
|
return _lookup(context_uid, repo_id, revision, both)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def sync_push(self, wire, url):
|
|
|
if not self.check_url(url, wire['config']):
|
|
|
return
|
|
|
|
|
|
repo = self._factory.repo(wire)
|
|
|
|
|
|
# Disable any prompts for this repo
|
|
|
repo.ui.setconfig(b'ui', b'interactive', b'off', b'-y')
|
|
|
|
|
|
bookmarks = list(dict(repo._bookmarks).keys())
|
|
|
remote = peer(repo, {}, safe_bytes(url))
|
|
|
# Disable any prompts for this remote
|
|
|
remote.ui.setconfig(b'ui', b'interactive', b'off', b'-y')
|
|
|
|
|
|
return exchange.push(
|
|
|
repo, remote, newbranch=True, bookmarks=bookmarks).cgresult
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def revision(self, wire, rev):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, rev)
|
|
|
return ctx.rev()
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def rev_range(self, wire, commit_filter):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _rev_range(_context_uid, _repo_id, _filter):
|
|
|
repo = self._factory.repo(wire)
|
|
|
revisions = [
|
|
|
ascii_str(repo[rev].hex())
|
|
|
for rev in revrange(repo, list(map(ascii_bytes, commit_filter)))
|
|
|
]
|
|
|
return revisions
|
|
|
|
|
|
return _rev_range(context_uid, repo_id, sorted(commit_filter))
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def rev_range_hash(self, wire, node):
|
|
|
repo = self._factory.repo(wire)
|
|
|
|
|
|
def get_revs(repo, rev_opt):
|
|
|
if rev_opt:
|
|
|
revs = revrange(repo, rev_opt)
|
|
|
if len(revs) == 0:
|
|
|
return (nullrev, nullrev)
|
|
|
return max(revs), min(revs)
|
|
|
else:
|
|
|
return len(repo) - 1, 0
|
|
|
|
|
|
stop, start = get_revs(repo, [node + ':'])
|
|
|
revs = [ascii_str(repo[r].hex()) for r in range(start, stop + 1)]
|
|
|
return revs
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def revs_from_revspec(self, wire, rev_spec, *args, **kwargs):
|
|
|
org_path = safe_bytes(wire["path"])
|
|
|
other_path = safe_bytes(kwargs.pop('other_path', ''))
|
|
|
|
|
|
# case when we want to compare two independent repositories
|
|
|
if other_path and other_path != wire["path"]:
|
|
|
baseui = self._factory._create_config(wire["config"])
|
|
|
repo = unionrepo.makeunionrepository(baseui, other_path, org_path)
|
|
|
else:
|
|
|
repo = self._factory.repo(wire)
|
|
|
return list(repo.revs(rev_spec, *args))
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def verify(self, wire,):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
|
|
|
baseui, output = patch_ui_message_output(baseui)
|
|
|
|
|
|
repo.ui = baseui
|
|
|
verify.verify(repo)
|
|
|
return output.getvalue()
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def hg_update_cache(self, wire,):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
baseui, output = patch_ui_message_output(baseui)
|
|
|
|
|
|
repo.ui = baseui
|
|
|
with repo.wlock(), repo.lock():
|
|
|
repo.updatecaches(full=True)
|
|
|
|
|
|
return output.getvalue()
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def hg_rebuild_fn_cache(self, wire,):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
baseui, output = patch_ui_message_output(baseui)
|
|
|
|
|
|
repo.ui = baseui
|
|
|
|
|
|
repair.rebuildfncache(baseui, repo)
|
|
|
|
|
|
return output.getvalue()
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def tags(self, wire):
|
|
|
cache_on, context_uid, repo_id = self._cache_on(wire)
|
|
|
region = self._region(wire)
|
|
|
|
|
|
@region.conditional_cache_on_arguments(condition=cache_on)
|
|
|
def _tags(_context_uid, _repo_id):
|
|
|
repo = self._factory.repo(wire)
|
|
|
return {safe_str(name): ascii_str(hex(sha)) for name, sha in repo.tags().items()}
|
|
|
|
|
|
return _tags(context_uid, repo_id)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def update(self, wire, node='', clean=False):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
node = safe_bytes(node)
|
|
|
|
|
|
commands.update(baseui, repo, node=node, clean=clean)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def identify(self, wire):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
output = io.BytesIO()
|
|
|
baseui.write = output.write
|
|
|
# This is required to get a full node id
|
|
|
baseui.debugflag = True
|
|
|
commands.identify(baseui, repo, id=True)
|
|
|
|
|
|
return output.getvalue()
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def heads(self, wire, branch=None):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
output = io.BytesIO()
|
|
|
|
|
|
def write(data, **unused_kwargs):
|
|
|
output.write(data)
|
|
|
|
|
|
baseui.write = write
|
|
|
if branch:
|
|
|
args = [safe_bytes(branch)]
|
|
|
else:
|
|
|
args = []
|
|
|
commands.heads(baseui, repo, template=b'{node} ', *args)
|
|
|
|
|
|
return output.getvalue()
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def ancestor(self, wire, revision1, revision2):
|
|
|
repo = self._factory.repo(wire)
|
|
|
changelog = repo.changelog
|
|
|
lookup = repo.lookup
|
|
|
a = changelog.ancestor(lookup(safe_bytes(revision1)), lookup(safe_bytes(revision2)))
|
|
|
return hex(a)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def clone(self, wire, source, dest, update_after_clone=False, hooks=True):
|
|
|
baseui = self._factory._create_config(wire["config"], hooks=hooks)
|
|
|
clone(baseui, safe_bytes(source), safe_bytes(dest), noupdate=not update_after_clone)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def commitctx(self, wire, message, parents, commit_time, commit_timezone, user, files, extra, removed, updated):
|
|
|
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
publishing = baseui.configbool(b'phases', b'publish')
|
|
|
|
|
|
def _filectxfn(_repo, ctx, path: bytes):
|
|
|
"""
|
|
|
Marks given path as added/changed/removed in a given _repo. This is
|
|
|
for internal mercurial commit function.
|
|
|
"""
|
|
|
|
|
|
# check if this path is removed
|
|
|
if safe_str(path) in removed:
|
|
|
# returning None is a way to mark node for removal
|
|
|
return None
|
|
|
|
|
|
# check if this path is added
|
|
|
for node in updated:
|
|
|
if safe_bytes(node['path']) == path:
|
|
|
return memfilectx(
|
|
|
_repo,
|
|
|
changectx=ctx,
|
|
|
path=safe_bytes(node['path']),
|
|
|
data=safe_bytes(node['content']),
|
|
|
islink=False,
|
|
|
isexec=bool(node['mode'] & stat.S_IXUSR),
|
|
|
copysource=False)
|
|
|
abort_exc = exceptions.AbortException()
|
|
|
raise abort_exc(f"Given path haven't been marked as added, changed or removed ({path})")
|
|
|
|
|
|
if publishing:
|
|
|
new_commit_phase = b'public'
|
|
|
else:
|
|
|
new_commit_phase = b'draft'
|
|
|
with repo.ui.configoverride({(b'phases', b'new-commit'): new_commit_phase}):
|
|
|
kwargs = {safe_bytes(k): safe_bytes(v) for k, v in extra.items()}
|
|
|
commit_ctx = memctx(
|
|
|
repo=repo,
|
|
|
parents=parents,
|
|
|
text=safe_bytes(message),
|
|
|
files=[safe_bytes(x) for x in files],
|
|
|
filectxfn=_filectxfn,
|
|
|
user=safe_bytes(user),
|
|
|
date=(commit_time, commit_timezone),
|
|
|
extra=kwargs)
|
|
|
|
|
|
n = repo.commitctx(commit_ctx)
|
|
|
new_id = hex(n)
|
|
|
|
|
|
return new_id
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def pull(self, wire, url, commit_ids=None):
|
|
|
repo = self._factory.repo(wire)
|
|
|
# Disable any prompts for this repo
|
|
|
repo.ui.setconfig(b'ui', b'interactive', b'off', b'-y')
|
|
|
|
|
|
remote = peer(repo, {}, safe_bytes(url))
|
|
|
# Disable any prompts for this remote
|
|
|
remote.ui.setconfig(b'ui', b'interactive', b'off', b'-y')
|
|
|
|
|
|
if commit_ids:
|
|
|
commit_ids = [bin(commit_id) for commit_id in commit_ids]
|
|
|
|
|
|
return exchange.pull(
|
|
|
repo, remote, heads=commit_ids, force=None).cgresult
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def pull_cmd(self, wire, source, bookmark='', branch='', revision='', hooks=True):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'], hooks=hooks)
|
|
|
|
|
|
source = safe_bytes(source)
|
|
|
|
|
|
# Mercurial internally has a lot of logic that checks ONLY if
|
|
|
# option is defined, we just pass those if they are defined then
|
|
|
opts = {}
|
|
|
|
|
|
if bookmark:
|
|
|
opts['bookmark'] = [safe_bytes(x) for x in bookmark] \
|
|
|
if isinstance(bookmark, list) else safe_bytes(bookmark)
|
|
|
|
|
|
if branch:
|
|
|
opts['branch'] = [safe_bytes(x) for x in branch] \
|
|
|
if isinstance(branch, list) else safe_bytes(branch)
|
|
|
|
|
|
if revision:
|
|
|
opts['rev'] = [safe_bytes(x) for x in revision] \
|
|
|
if isinstance(revision, list) else safe_bytes(revision)
|
|
|
|
|
|
commands.pull(baseui, repo, source, **opts)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def push(self, wire, revisions, dest_path, hooks: bool = True, push_branches: bool = False):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'], hooks=hooks)
|
|
|
|
|
|
revisions = [safe_bytes(x) for x in revisions] \
|
|
|
if isinstance(revisions, list) else safe_bytes(revisions)
|
|
|
|
|
|
commands.push(baseui, repo, safe_bytes(dest_path),
|
|
|
rev=revisions,
|
|
|
new_branch=push_branches)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def strip(self, wire, revision, update, backup):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
hgext_strip.strip(
|
|
|
repo.baseui, repo, ctx.node(), update=update, backup=backup)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def get_unresolved_files(self, wire):
|
|
|
repo = self._factory.repo(wire)
|
|
|
|
|
|
log.debug('Calculating unresolved files for repo: %s', repo)
|
|
|
output = io.BytesIO()
|
|
|
|
|
|
def write(data, **unused_kwargs):
|
|
|
output.write(data)
|
|
|
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
baseui.write = write
|
|
|
|
|
|
commands.resolve(baseui, repo, list=True)
|
|
|
unresolved = output.getvalue().splitlines(0)
|
|
|
return unresolved
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def merge(self, wire, revision):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
repo.ui.setconfig(b'ui', b'merge', b'internal:dump')
|
|
|
|
|
|
# In case of sub repositories are used mercurial prompts the user in
|
|
|
# case of merge conflicts or different sub repository sources. By
|
|
|
# setting the interactive flag to `False` mercurial doesn't prompt the
|
|
|
# used but instead uses a default value.
|
|
|
repo.ui.setconfig(b'ui', b'interactive', False)
|
|
|
commands.merge(baseui, repo, rev=safe_bytes(revision))
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def merge_state(self, wire):
|
|
|
repo = self._factory.repo(wire)
|
|
|
repo.ui.setconfig(b'ui', b'merge', b'internal:dump')
|
|
|
|
|
|
# In case of sub repositories are used mercurial prompts the user in
|
|
|
# case of merge conflicts or different sub repository sources. By
|
|
|
# setting the interactive flag to `False` mercurial doesn't prompt the
|
|
|
# used but instead uses a default value.
|
|
|
repo.ui.setconfig(b'ui', b'interactive', False)
|
|
|
ms = hg_merge.mergestate(repo)
|
|
|
return [x for x in ms.unresolved()]
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def commit(self, wire, message, username, close_branch=False):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
repo.ui.setconfig(b'ui', b'username', safe_bytes(username))
|
|
|
commands.commit(baseui, repo, message=safe_bytes(message), close_branch=close_branch)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def rebase(self, wire, source='', dest='', abort=False):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
repo.ui.setconfig(b'ui', b'merge', b'internal:dump')
|
|
|
# In case of sub repositories are used mercurial prompts the user in
|
|
|
# case of merge conflicts or different sub repository sources. By
|
|
|
# setting the interactive flag to `False` mercurial doesn't prompt the
|
|
|
# used but instead uses a default value.
|
|
|
repo.ui.setconfig(b'ui', b'interactive', False)
|
|
|
|
|
|
rebase.rebase(baseui, repo, base=safe_bytes(source or ''), dest=safe_bytes(dest or ''),
|
|
|
abort=abort, keep=not abort)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def tag(self, wire, name, revision, message, local, user, tag_time, tag_timezone):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = self._get_ctx(repo, revision)
|
|
|
node = ctx.node()
|
|
|
|
|
|
date = (tag_time, tag_timezone)
|
|
|
try:
|
|
|
hg_tag.tag(repo, safe_bytes(name), node, safe_bytes(message), local, safe_bytes(user), date)
|
|
|
except Abort as e:
|
|
|
log.exception("Tag operation aborted")
|
|
|
# Exception can contain unicode which we convert
|
|
|
raise exceptions.AbortException(e)(repr(e))
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def bookmark(self, wire, bookmark, revision=''):
|
|
|
repo = self._factory.repo(wire)
|
|
|
baseui = self._factory._create_config(wire['config'])
|
|
|
revision = revision or ''
|
|
|
commands.bookmark(baseui, repo, safe_bytes(bookmark), rev=safe_bytes(revision), force=True)
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def install_hooks(self, wire, force=False):
|
|
|
# we don't need any special hooks for Mercurial
|
|
|
pass
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def get_hooks_info(self, wire):
|
|
|
return {
|
|
|
'pre_version': vcsserver.__version__,
|
|
|
'post_version': vcsserver.__version__,
|
|
|
}
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def set_head_ref(self, wire, head_name):
|
|
|
pass
|
|
|
|
|
|
@reraise_safe_exceptions
|
|
|
def archive_repo(self, wire, archive_name_key, kind, mtime, archive_at_path,
|
|
|
archive_dir_name, commit_id, cache_config):
|
|
|
|
|
|
def file_walker(_commit_id, path):
|
|
|
repo = self._factory.repo(wire)
|
|
|
ctx = repo[_commit_id]
|
|
|
is_root = path in ['', '/']
|
|
|
if is_root:
|
|
|
matcher = alwaysmatcher(badfn=None)
|
|
|
else:
|
|
|
matcher = patternmatcher('', [(b'glob', safe_bytes(path)+b'/**', b'')], badfn=None)
|
|
|
file_iter = ctx.manifest().walk(matcher)
|
|
|
|
|
|
for fn in file_iter:
|
|
|
file_path = fn
|
|
|
flags = ctx.flags(fn)
|
|
|
mode = b'x' in flags and 0o755 or 0o644
|
|
|
is_link = b'l' in flags
|
|
|
|
|
|
yield ArchiveNode(file_path, mode, is_link, ctx[fn].data)
|
|
|
|
|
|
return store_archive_in_cache(
|
|
|
file_walker, archive_name_key, kind, mtime, archive_at_path, archive_dir_name, commit_id, cache_config=cache_config)
|
|
|
|
|
|
|