##// END OF EJS Templates
hg: recognize include and exclude patterns when cloning...
hg: recognize include and exclude patterns when cloning This commit teaches clone() to accept arguments defining file patterns to clone. This is the first step in teaching core code about the existence of a narrow clone. Right now, we only perform validation of the arguments and pass additional options into createopts to influence repository creation. Nothing of consequence happens with that creation option yet, however. For now, arbitrary restrictions exist, such as not allowing patterns for shared repos and disabling local copies when patterns are defined. We can potentially lift these restrictions in the future once partial clone/storage support is more flushed out. I figure it is best to reduce the surface area for bugs for the time being. It may seem weird to prefix these arguments with "store." However, clone is effectively pull + update and file patterns could apply to both the store and the working directory. The prefix is there to disambiguate in the future when this function may want to use different sets of patterns for the store and working directory. Differential Revision: https://phab.mercurial-scm.org/D4536

File last commit:

r37829:51dee6fa stable
r39586:65b5900f default
Show More
fileindexapi.py
110 lines | 3.4 KiB | text/x-python | PythonLexer
# Infinite push
#
# Copyright 2016 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""
[infinitepush]
# Server-side option. Used only if indextype=disk.
# Filesystem path to the index store
indexpath = PATH
"""
from __future__ import absolute_import
import os
from mercurial import util
from mercurial.utils import stringutil
from . import indexapi
class fileindexapi(indexapi.indexapi):
def __init__(self, repo):
super(fileindexapi, self).__init__()
self._repo = repo
root = repo.ui.config('infinitepush', 'indexpath')
if not root:
root = os.path.join('scratchbranches', 'index')
self._nodemap = os.path.join(root, 'nodemap')
self._bookmarkmap = os.path.join(root, 'bookmarkmap')
self._metadatamap = os.path.join(root, 'nodemetadatamap')
self._lock = None
def __enter__(self):
self._lock = self._repo.wlock()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._lock:
self._lock.__exit__(exc_type, exc_val, exc_tb)
def addbundle(self, bundleid, nodesctx):
for node in nodesctx:
nodepath = os.path.join(self._nodemap, node.hex())
self._write(nodepath, bundleid)
def addbookmark(self, bookmark, node):
bookmarkpath = os.path.join(self._bookmarkmap, bookmark)
self._write(bookmarkpath, node)
def addmanybookmarks(self, bookmarks):
for bookmark, node in bookmarks.items():
self.addbookmark(bookmark, node)
def deletebookmarks(self, patterns):
for pattern in patterns:
for bookmark, _ in self._listbookmarks(pattern):
bookmarkpath = os.path.join(self._bookmarkmap, bookmark)
self._delete(bookmarkpath)
def getbundle(self, node):
nodepath = os.path.join(self._nodemap, node)
return self._read(nodepath)
def getnode(self, bookmark):
bookmarkpath = os.path.join(self._bookmarkmap, bookmark)
return self._read(bookmarkpath)
def getbookmarks(self, query):
return dict(self._listbookmarks(query))
def saveoptionaljsonmetadata(self, node, jsonmetadata):
vfs = self._repo.vfs
vfs.write(os.path.join(self._metadatamap, node), jsonmetadata)
def _listbookmarks(self, pattern):
if pattern.endswith('*'):
pattern = 're:^' + pattern[:-1] + '.*'
kind, pat, matcher = stringutil.stringmatcher(pattern)
prefixlen = len(self._bookmarkmap) + 1
for dirpath, _, books in self._repo.vfs.walk(self._bookmarkmap):
for book in books:
bookmark = os.path.join(dirpath, book)[prefixlen:]
bookmark = util.pconvert(bookmark)
if not matcher(bookmark):
continue
yield bookmark, self._read(os.path.join(dirpath, book))
def _write(self, path, value):
vfs = self._repo.vfs
dirname = vfs.dirname(path)
if not vfs.exists(dirname):
vfs.makedirs(dirname)
vfs.write(path, value)
def _read(self, path):
vfs = self._repo.vfs
if not vfs.exists(path):
return None
return vfs.read(path)
def _delete(self, path):
vfs = self._repo.vfs
if not vfs.exists(path):
return
return vfs.unlink(path)