##// END OF EJS Templates
narrow: move .hg/narrowspec to .hg/store/narrowspec (BC)...
Martin von Zweigbergk -
r39145:576eef1a default
parent child Browse files
Show More
@@ -1,93 +1,90 b''
1 # __init__.py - narrowhg extension
1 # __init__.py - narrowhg extension
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 '''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
7 '''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
11 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
12 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
12 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
13 # be specifying the version(s) of Mercurial they are tested with, or
13 # be specifying the version(s) of Mercurial they are tested with, or
14 # leave the attribute unspecified.
14 # leave the attribute unspecified.
15 testedwith = 'ships-with-hg-core'
15 testedwith = 'ships-with-hg-core'
16
16
17 from mercurial import (
17 from mercurial import (
18 extensions,
18 extensions,
19 hg,
20 localrepo,
19 localrepo,
21 registrar,
20 registrar,
22 repository,
21 repository,
23 verify as verifymod,
22 verify as verifymod,
24 )
23 )
25
24
26 from . import (
25 from . import (
27 narrowbundle2,
26 narrowbundle2,
28 narrowchangegroup,
27 narrowchangegroup,
29 narrowcommands,
28 narrowcommands,
30 narrowcopies,
29 narrowcopies,
31 narrowpatch,
30 narrowpatch,
32 narrowrepo,
31 narrowrepo,
33 narrowrevlog,
32 narrowrevlog,
34 narrowtemplates,
33 narrowtemplates,
35 narrowwirepeer,
34 narrowwirepeer,
36 )
35 )
37
36
38 configtable = {}
37 configtable = {}
39 configitem = registrar.configitem(configtable)
38 configitem = registrar.configitem(configtable)
40 # Narrowhg *has* support for serving ellipsis nodes (which are used at
39 # Narrowhg *has* support for serving ellipsis nodes (which are used at
41 # least by Google's internal server), but that support is pretty
40 # least by Google's internal server), but that support is pretty
42 # fragile and has a lot of problems on real-world repositories that
41 # fragile and has a lot of problems on real-world repositories that
43 # have complex graph topologies. This could probably be corrected, but
42 # have complex graph topologies. This could probably be corrected, but
44 # absent someone needing the full support for ellipsis nodes in
43 # absent someone needing the full support for ellipsis nodes in
45 # repositories with merges, it's unlikely this work will get done. As
44 # repositories with merges, it's unlikely this work will get done. As
46 # of this writining in late 2017, all repositories large enough for
45 # of this writining in late 2017, all repositories large enough for
47 # ellipsis nodes to be a hard requirement also enforce strictly linear
46 # ellipsis nodes to be a hard requirement also enforce strictly linear
48 # history for other scaling reasons.
47 # history for other scaling reasons.
49 configitem('experimental', 'narrowservebrokenellipses',
48 configitem('experimental', 'narrowservebrokenellipses',
50 default=False,
49 default=False,
51 alias=[('narrow', 'serveellipses')],
50 alias=[('narrow', 'serveellipses')],
52 )
51 )
53
52
54 # Export the commands table for Mercurial to see.
53 # Export the commands table for Mercurial to see.
55 cmdtable = narrowcommands.table
54 cmdtable = narrowcommands.table
56
55
57 def featuresetup(ui, features):
56 def featuresetup(ui, features):
58 features.add(repository.NARROW_REQUIREMENT)
57 features.add(repository.NARROW_REQUIREMENT)
59
58
60 def uisetup(ui):
59 def uisetup(ui):
61 """Wraps user-facing mercurial commands with narrow-aware versions."""
60 """Wraps user-facing mercurial commands with narrow-aware versions."""
62 localrepo.featuresetupfuncs.add(featuresetup)
61 localrepo.featuresetupfuncs.add(featuresetup)
63 narrowrevlog.setup()
62 narrowrevlog.setup()
64 narrowbundle2.setup()
63 narrowbundle2.setup()
65 narrowcommands.setup()
64 narrowcommands.setup()
66 narrowchangegroup.setup()
65 narrowchangegroup.setup()
67 narrowwirepeer.uisetup()
66 narrowwirepeer.uisetup()
68
67
69 def reposetup(ui, repo):
68 def reposetup(ui, repo):
70 """Wraps local repositories with narrow repo support."""
69 """Wraps local repositories with narrow repo support."""
71 if not repo.local():
70 if not repo.local():
72 return
71 return
73
72
74 if repository.NARROW_REQUIREMENT in repo.requirements:
73 if repository.NARROW_REQUIREMENT in repo.requirements:
75 narrowrepo.wraprepo(repo)
74 narrowrepo.wraprepo(repo)
76 narrowcopies.setup(repo)
75 narrowcopies.setup(repo)
77 narrowpatch.setup(repo)
76 narrowpatch.setup(repo)
78 narrowwirepeer.reposetup(repo)
77 narrowwirepeer.reposetup(repo)
79
78
80 def _verifierinit(orig, self, repo, matcher=None):
79 def _verifierinit(orig, self, repo, matcher=None):
81 # The verifier's matcher argument was desgined for narrowhg, so it should
80 # The verifier's matcher argument was desgined for narrowhg, so it should
82 # be None from core. If another extension passes a matcher (unlikely),
81 # be None from core. If another extension passes a matcher (unlikely),
83 # we'll have to fail until matchers can be composed more easily.
82 # we'll have to fail until matchers can be composed more easily.
84 assert matcher is None
83 assert matcher is None
85 orig(self, repo, repo.narrowmatch())
84 orig(self, repo, repo.narrowmatch())
86
85
87 def extsetup(ui):
86 def extsetup(ui):
88 extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
87 extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
89 extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
90 extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
91
88
92 templatekeyword = narrowtemplates.templatekeyword
89 templatekeyword = narrowtemplates.templatekeyword
93 revsetpredicate = narrowtemplates.revsetpredicate
90 revsetpredicate = narrowtemplates.revsetpredicate
@@ -1,52 +1,29 b''
1 # narrowrepo.py - repository which supports narrow revlogs, lazy loading
1 # narrowrepo.py - repository which supports narrow revlogs, lazy loading
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial import (
11 hg,
12 narrowspec,
13 repository,
14 )
15
16 from . import (
10 from . import (
17 narrowdirstate,
11 narrowdirstate,
18 narrowrevlog,
12 narrowrevlog,
19 )
13 )
20
14
21 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
22 orig(sourcerepo, destrepo, **kwargs)
23 if repository.NARROW_REQUIREMENT in sourcerepo.requirements:
24 with destrepo.wlock():
25 with destrepo.vfs('shared', 'a') as fp:
26 fp.write(narrowspec.FILENAME + '\n')
27
28 def unsharenarrowspec(orig, ui, repo, repopath):
29 if (repository.NARROW_REQUIREMENT in repo.requirements
30 and repo.path == repopath and repo.shared()):
31 srcrepo = hg.sharedreposource(repo)
32 with srcrepo.vfs(narrowspec.FILENAME) as f:
33 spec = f.read()
34 with repo.vfs(narrowspec.FILENAME, 'w') as f:
35 f.write(spec)
36 return orig(ui, repo, repopath)
37
38 def wraprepo(repo):
15 def wraprepo(repo):
39 """Enables narrow clone functionality on a single local repository."""
16 """Enables narrow clone functionality on a single local repository."""
40
17
41 class narrowrepository(repo.__class__):
18 class narrowrepository(repo.__class__):
42
19
43 def file(self, f):
20 def file(self, f):
44 fl = super(narrowrepository, self).file(f)
21 fl = super(narrowrepository, self).file(f)
45 narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
22 narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
46 return fl
23 return fl
47
24
48 def _makedirstate(self):
25 def _makedirstate(self):
49 dirstate = super(narrowrepository, self)._makedirstate()
26 dirstate = super(narrowrepository, self)._makedirstate()
50 return narrowdirstate.wrapdirstate(self, dirstate)
27 return narrowdirstate.wrapdirstate(self, dirstate)
51
28
52 repo.__class__ = narrowrepository
29 repo.__class__ = narrowrepository
@@ -1,2405 +1,2405 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 release = lockmod.release
73 release = lockmod.release
74 urlerr = util.urlerr
74 urlerr = util.urlerr
75 urlreq = util.urlreq
75 urlreq = util.urlreq
76
76
77 # set of (path, vfs-location) tuples. vfs-location is:
77 # set of (path, vfs-location) tuples. vfs-location is:
78 # - 'plain for vfs relative paths
78 # - 'plain for vfs relative paths
79 # - '' for svfs relative paths
79 # - '' for svfs relative paths
80 _cachedfiles = set()
80 _cachedfiles = set()
81
81
82 class _basefilecache(scmutil.filecache):
82 class _basefilecache(scmutil.filecache):
83 """All filecache usage on repo are done for logic that should be unfiltered
83 """All filecache usage on repo are done for logic that should be unfiltered
84 """
84 """
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 if repo is None:
86 if repo is None:
87 return self
87 return self
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 def __set__(self, repo, value):
89 def __set__(self, repo, value):
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 def __delete__(self, repo):
91 def __delete__(self, repo):
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93
93
94 class repofilecache(_basefilecache):
94 class repofilecache(_basefilecache):
95 """filecache for files in .hg but outside of .hg/store"""
95 """filecache for files in .hg but outside of .hg/store"""
96 def __init__(self, *paths):
96 def __init__(self, *paths):
97 super(repofilecache, self).__init__(*paths)
97 super(repofilecache, self).__init__(*paths)
98 for path in paths:
98 for path in paths:
99 _cachedfiles.add((path, 'plain'))
99 _cachedfiles.add((path, 'plain'))
100
100
101 def join(self, obj, fname):
101 def join(self, obj, fname):
102 return obj.vfs.join(fname)
102 return obj.vfs.join(fname)
103
103
104 class storecache(_basefilecache):
104 class storecache(_basefilecache):
105 """filecache for files in the store"""
105 """filecache for files in the store"""
106 def __init__(self, *paths):
106 def __init__(self, *paths):
107 super(storecache, self).__init__(*paths)
107 super(storecache, self).__init__(*paths)
108 for path in paths:
108 for path in paths:
109 _cachedfiles.add((path, ''))
109 _cachedfiles.add((path, ''))
110
110
111 def join(self, obj, fname):
111 def join(self, obj, fname):
112 return obj.sjoin(fname)
112 return obj.sjoin(fname)
113
113
114 def isfilecached(repo, name):
114 def isfilecached(repo, name):
115 """check if a repo has already cached "name" filecache-ed property
115 """check if a repo has already cached "name" filecache-ed property
116
116
117 This returns (cachedobj-or-None, iscached) tuple.
117 This returns (cachedobj-or-None, iscached) tuple.
118 """
118 """
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 if not cacheentry:
120 if not cacheentry:
121 return None, False
121 return None, False
122 return cacheentry.obj, True
122 return cacheentry.obj, True
123
123
124 class unfilteredpropertycache(util.propertycache):
124 class unfilteredpropertycache(util.propertycache):
125 """propertycache that apply to unfiltered repo only"""
125 """propertycache that apply to unfiltered repo only"""
126
126
127 def __get__(self, repo, type=None):
127 def __get__(self, repo, type=None):
128 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
129 if unfi is repo:
129 if unfi is repo:
130 return super(unfilteredpropertycache, self).__get__(unfi)
130 return super(unfilteredpropertycache, self).__get__(unfi)
131 return getattr(unfi, self.name)
131 return getattr(unfi, self.name)
132
132
133 class filteredpropertycache(util.propertycache):
133 class filteredpropertycache(util.propertycache):
134 """propertycache that must take filtering in account"""
134 """propertycache that must take filtering in account"""
135
135
136 def cachevalue(self, obj, value):
136 def cachevalue(self, obj, value):
137 object.__setattr__(obj, self.name, value)
137 object.__setattr__(obj, self.name, value)
138
138
139
139
140 def hasunfilteredcache(repo, name):
140 def hasunfilteredcache(repo, name):
141 """check if a repo has an unfilteredpropertycache value for <name>"""
141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 return name in vars(repo.unfiltered())
142 return name in vars(repo.unfiltered())
143
143
144 def unfilteredmethod(orig):
144 def unfilteredmethod(orig):
145 """decorate method that always need to be run on unfiltered version"""
145 """decorate method that always need to be run on unfiltered version"""
146 def wrapper(repo, *args, **kwargs):
146 def wrapper(repo, *args, **kwargs):
147 return orig(repo.unfiltered(), *args, **kwargs)
147 return orig(repo.unfiltered(), *args, **kwargs)
148 return wrapper
148 return wrapper
149
149
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 'unbundle'}
151 'unbundle'}
152 legacycaps = moderncaps.union({'changegroupsubset'})
152 legacycaps = moderncaps.union({'changegroupsubset'})
153
153
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 class localcommandexecutor(object):
155 class localcommandexecutor(object):
156 def __init__(self, peer):
156 def __init__(self, peer):
157 self._peer = peer
157 self._peer = peer
158 self._sent = False
158 self._sent = False
159 self._closed = False
159 self._closed = False
160
160
161 def __enter__(self):
161 def __enter__(self):
162 return self
162 return self
163
163
164 def __exit__(self, exctype, excvalue, exctb):
164 def __exit__(self, exctype, excvalue, exctb):
165 self.close()
165 self.close()
166
166
167 def callcommand(self, command, args):
167 def callcommand(self, command, args):
168 if self._sent:
168 if self._sent:
169 raise error.ProgrammingError('callcommand() cannot be used after '
169 raise error.ProgrammingError('callcommand() cannot be used after '
170 'sendcommands()')
170 'sendcommands()')
171
171
172 if self._closed:
172 if self._closed:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'close()')
174 'close()')
175
175
176 # We don't need to support anything fancy. Just call the named
176 # We don't need to support anything fancy. Just call the named
177 # method on the peer and return a resolved future.
177 # method on the peer and return a resolved future.
178 fn = getattr(self._peer, pycompat.sysstr(command))
178 fn = getattr(self._peer, pycompat.sysstr(command))
179
179
180 f = pycompat.futures.Future()
180 f = pycompat.futures.Future()
181
181
182 try:
182 try:
183 result = fn(**pycompat.strkwargs(args))
183 result = fn(**pycompat.strkwargs(args))
184 except Exception:
184 except Exception:
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 else:
186 else:
187 f.set_result(result)
187 f.set_result(result)
188
188
189 return f
189 return f
190
190
191 def sendcommands(self):
191 def sendcommands(self):
192 self._sent = True
192 self._sent = True
193
193
194 def close(self):
194 def close(self):
195 self._closed = True
195 self._closed = True
196
196
197 @interfaceutil.implementer(repository.ipeercommands)
197 @interfaceutil.implementer(repository.ipeercommands)
198 class localpeer(repository.peer):
198 class localpeer(repository.peer):
199 '''peer for a local repo; reflects only the most recent API'''
199 '''peer for a local repo; reflects only the most recent API'''
200
200
201 def __init__(self, repo, caps=None):
201 def __init__(self, repo, caps=None):
202 super(localpeer, self).__init__()
202 super(localpeer, self).__init__()
203
203
204 if caps is None:
204 if caps is None:
205 caps = moderncaps.copy()
205 caps = moderncaps.copy()
206 self._repo = repo.filtered('served')
206 self._repo = repo.filtered('served')
207 self.ui = repo.ui
207 self.ui = repo.ui
208 self._caps = repo._restrictcapabilities(caps)
208 self._caps = repo._restrictcapabilities(caps)
209
209
210 # Begin of _basepeer interface.
210 # Begin of _basepeer interface.
211
211
212 def url(self):
212 def url(self):
213 return self._repo.url()
213 return self._repo.url()
214
214
215 def local(self):
215 def local(self):
216 return self._repo
216 return self._repo
217
217
218 def peer(self):
218 def peer(self):
219 return self
219 return self
220
220
221 def canpush(self):
221 def canpush(self):
222 return True
222 return True
223
223
224 def close(self):
224 def close(self):
225 self._repo.close()
225 self._repo.close()
226
226
227 # End of _basepeer interface.
227 # End of _basepeer interface.
228
228
229 # Begin of _basewirecommands interface.
229 # Begin of _basewirecommands interface.
230
230
231 def branchmap(self):
231 def branchmap(self):
232 return self._repo.branchmap()
232 return self._repo.branchmap()
233
233
234 def capabilities(self):
234 def capabilities(self):
235 return self._caps
235 return self._caps
236
236
237 def clonebundles(self):
237 def clonebundles(self):
238 return self._repo.tryread('clonebundles.manifest')
238 return self._repo.tryread('clonebundles.manifest')
239
239
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 """Used to test argument passing over the wire"""
241 """Used to test argument passing over the wire"""
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 pycompat.bytestr(four),
243 pycompat.bytestr(four),
244 pycompat.bytestr(five))
244 pycompat.bytestr(five))
245
245
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 **kwargs):
247 **kwargs):
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 common=common, bundlecaps=bundlecaps,
249 common=common, bundlecaps=bundlecaps,
250 **kwargs)[1]
250 **kwargs)[1]
251 cb = util.chunkbuffer(chunks)
251 cb = util.chunkbuffer(chunks)
252
252
253 if exchange.bundle2requested(bundlecaps):
253 if exchange.bundle2requested(bundlecaps):
254 # When requesting a bundle2, getbundle returns a stream to make the
254 # When requesting a bundle2, getbundle returns a stream to make the
255 # wire level function happier. We need to build a proper object
255 # wire level function happier. We need to build a proper object
256 # from it in local peer.
256 # from it in local peer.
257 return bundle2.getunbundler(self.ui, cb)
257 return bundle2.getunbundler(self.ui, cb)
258 else:
258 else:
259 return changegroup.getunbundler('01', cb, None)
259 return changegroup.getunbundler('01', cb, None)
260
260
261 def heads(self):
261 def heads(self):
262 return self._repo.heads()
262 return self._repo.heads()
263
263
264 def known(self, nodes):
264 def known(self, nodes):
265 return self._repo.known(nodes)
265 return self._repo.known(nodes)
266
266
267 def listkeys(self, namespace):
267 def listkeys(self, namespace):
268 return self._repo.listkeys(namespace)
268 return self._repo.listkeys(namespace)
269
269
270 def lookup(self, key):
270 def lookup(self, key):
271 return self._repo.lookup(key)
271 return self._repo.lookup(key)
272
272
273 def pushkey(self, namespace, key, old, new):
273 def pushkey(self, namespace, key, old, new):
274 return self._repo.pushkey(namespace, key, old, new)
274 return self._repo.pushkey(namespace, key, old, new)
275
275
276 def stream_out(self):
276 def stream_out(self):
277 raise error.Abort(_('cannot perform stream clone against local '
277 raise error.Abort(_('cannot perform stream clone against local '
278 'peer'))
278 'peer'))
279
279
280 def unbundle(self, bundle, heads, url):
280 def unbundle(self, bundle, heads, url):
281 """apply a bundle on a repo
281 """apply a bundle on a repo
282
282
283 This function handles the repo locking itself."""
283 This function handles the repo locking itself."""
284 try:
284 try:
285 try:
285 try:
286 bundle = exchange.readbundle(self.ui, bundle, None)
286 bundle = exchange.readbundle(self.ui, bundle, None)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 if util.safehasattr(ret, 'getchunks'):
288 if util.safehasattr(ret, 'getchunks'):
289 # This is a bundle20 object, turn it into an unbundler.
289 # This is a bundle20 object, turn it into an unbundler.
290 # This little dance should be dropped eventually when the
290 # This little dance should be dropped eventually when the
291 # API is finally improved.
291 # API is finally improved.
292 stream = util.chunkbuffer(ret.getchunks())
292 stream = util.chunkbuffer(ret.getchunks())
293 ret = bundle2.getunbundler(self.ui, stream)
293 ret = bundle2.getunbundler(self.ui, stream)
294 return ret
294 return ret
295 except Exception as exc:
295 except Exception as exc:
296 # If the exception contains output salvaged from a bundle2
296 # If the exception contains output salvaged from a bundle2
297 # reply, we need to make sure it is printed before continuing
297 # reply, we need to make sure it is printed before continuing
298 # to fail. So we build a bundle2 with such output and consume
298 # to fail. So we build a bundle2 with such output and consume
299 # it directly.
299 # it directly.
300 #
300 #
301 # This is not very elegant but allows a "simple" solution for
301 # This is not very elegant but allows a "simple" solution for
302 # issue4594
302 # issue4594
303 output = getattr(exc, '_bundle2salvagedoutput', ())
303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 if output:
304 if output:
305 bundler = bundle2.bundle20(self._repo.ui)
305 bundler = bundle2.bundle20(self._repo.ui)
306 for out in output:
306 for out in output:
307 bundler.addpart(out)
307 bundler.addpart(out)
308 stream = util.chunkbuffer(bundler.getchunks())
308 stream = util.chunkbuffer(bundler.getchunks())
309 b = bundle2.getunbundler(self.ui, stream)
309 b = bundle2.getunbundler(self.ui, stream)
310 bundle2.processbundle(self._repo, b)
310 bundle2.processbundle(self._repo, b)
311 raise
311 raise
312 except error.PushRaced as exc:
312 except error.PushRaced as exc:
313 raise error.ResponseError(_('push failed:'),
313 raise error.ResponseError(_('push failed:'),
314 stringutil.forcebytestr(exc))
314 stringutil.forcebytestr(exc))
315
315
316 # End of _basewirecommands interface.
316 # End of _basewirecommands interface.
317
317
318 # Begin of peer interface.
318 # Begin of peer interface.
319
319
320 def commandexecutor(self):
320 def commandexecutor(self):
321 return localcommandexecutor(self)
321 return localcommandexecutor(self)
322
322
323 # End of peer interface.
323 # End of peer interface.
324
324
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 class locallegacypeer(localpeer):
326 class locallegacypeer(localpeer):
327 '''peer extension which implements legacy methods too; used for tests with
327 '''peer extension which implements legacy methods too; used for tests with
328 restricted capabilities'''
328 restricted capabilities'''
329
329
330 def __init__(self, repo):
330 def __init__(self, repo):
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332
332
333 # Begin of baselegacywirecommands interface.
333 # Begin of baselegacywirecommands interface.
334
334
335 def between(self, pairs):
335 def between(self, pairs):
336 return self._repo.between(pairs)
336 return self._repo.between(pairs)
337
337
338 def branches(self, nodes):
338 def branches(self, nodes):
339 return self._repo.branches(nodes)
339 return self._repo.branches(nodes)
340
340
341 def changegroup(self, nodes, source):
341 def changegroup(self, nodes, source):
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 missingheads=self._repo.heads())
343 missingheads=self._repo.heads())
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345
345
346 def changegroupsubset(self, bases, heads, source):
346 def changegroupsubset(self, bases, heads, source):
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 missingheads=heads)
348 missingheads=heads)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350
350
351 # End of baselegacywirecommands interface.
351 # End of baselegacywirecommands interface.
352
352
353 # Increment the sub-version when the revlog v2 format changes to lock out old
353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 # clients.
354 # clients.
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356
356
357 # A repository with the sparserevlog feature will have delta chains that
357 # A repository with the sparserevlog feature will have delta chains that
358 # can spread over a larger span. Sparse reading cuts these large spans into
358 # can spread over a larger span. Sparse reading cuts these large spans into
359 # pieces, so that each piece isn't too big.
359 # pieces, so that each piece isn't too big.
360 # Without the sparserevlog capability, reading from the repository could use
360 # Without the sparserevlog capability, reading from the repository could use
361 # huge amounts of memory, because the whole span would be read at once,
361 # huge amounts of memory, because the whole span would be read at once,
362 # including all the intermediate revisions that aren't pertinent for the chain.
362 # including all the intermediate revisions that aren't pertinent for the chain.
363 # This is why once a repository has enabled sparse-read, it becomes required.
363 # This is why once a repository has enabled sparse-read, it becomes required.
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365
365
366 # Functions receiving (ui, features) that extensions can register to impact
366 # Functions receiving (ui, features) that extensions can register to impact
367 # the ability to load repositories with custom requirements. Only
367 # the ability to load repositories with custom requirements. Only
368 # functions defined in loaded extensions are called.
368 # functions defined in loaded extensions are called.
369 #
369 #
370 # The function receives a set of requirement strings that the repository
370 # The function receives a set of requirement strings that the repository
371 # is capable of opening. Functions will typically add elements to the
371 # is capable of opening. Functions will typically add elements to the
372 # set to reflect that the extension knows how to handle that requirements.
372 # set to reflect that the extension knows how to handle that requirements.
373 featuresetupfuncs = set()
373 featuresetupfuncs = set()
374
374
375 @interfaceutil.implementer(repository.completelocalrepository)
375 @interfaceutil.implementer(repository.completelocalrepository)
376 class localrepository(object):
376 class localrepository(object):
377
377
378 # obsolete experimental requirements:
378 # obsolete experimental requirements:
379 # - manifestv2: An experimental new manifest format that allowed
379 # - manifestv2: An experimental new manifest format that allowed
380 # for stem compression of long paths. Experiment ended up not
380 # for stem compression of long paths. Experiment ended up not
381 # being successful (repository sizes went up due to worse delta
381 # being successful (repository sizes went up due to worse delta
382 # chains), and the code was deleted in 4.6.
382 # chains), and the code was deleted in 4.6.
383 supportedformats = {
383 supportedformats = {
384 'revlogv1',
384 'revlogv1',
385 'generaldelta',
385 'generaldelta',
386 'treemanifest',
386 'treemanifest',
387 REVLOGV2_REQUIREMENT,
387 REVLOGV2_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
389 }
389 }
390 _basesupported = supportedformats | {
390 _basesupported = supportedformats | {
391 'store',
391 'store',
392 'fncache',
392 'fncache',
393 'shared',
393 'shared',
394 'relshared',
394 'relshared',
395 'dotencode',
395 'dotencode',
396 'exp-sparse',
396 'exp-sparse',
397 }
397 }
398 openerreqs = {
398 openerreqs = {
399 'revlogv1',
399 'revlogv1',
400 'generaldelta',
400 'generaldelta',
401 'treemanifest',
401 'treemanifest',
402 }
402 }
403
403
404 # list of prefix for file which can be written without 'wlock'
404 # list of prefix for file which can be written without 'wlock'
405 # Extensions should extend this list when needed
405 # Extensions should extend this list when needed
406 _wlockfreeprefix = {
406 _wlockfreeprefix = {
407 # We migh consider requiring 'wlock' for the next
407 # We migh consider requiring 'wlock' for the next
408 # two, but pretty much all the existing code assume
408 # two, but pretty much all the existing code assume
409 # wlock is not needed so we keep them excluded for
409 # wlock is not needed so we keep them excluded for
410 # now.
410 # now.
411 'hgrc',
411 'hgrc',
412 'requires',
412 'requires',
413 # XXX cache is a complicatged business someone
413 # XXX cache is a complicatged business someone
414 # should investigate this in depth at some point
414 # should investigate this in depth at some point
415 'cache/',
415 'cache/',
416 # XXX shouldn't be dirstate covered by the wlock?
416 # XXX shouldn't be dirstate covered by the wlock?
417 'dirstate',
417 'dirstate',
418 # XXX bisect was still a bit too messy at the time
418 # XXX bisect was still a bit too messy at the time
419 # this changeset was introduced. Someone should fix
419 # this changeset was introduced. Someone should fix
420 # the remainig bit and drop this line
420 # the remainig bit and drop this line
421 'bisect.state',
421 'bisect.state',
422 }
422 }
423
423
424 def __init__(self, baseui, path, create=False, intents=None):
424 def __init__(self, baseui, path, create=False, intents=None):
425 self.requirements = set()
425 self.requirements = set()
426 self.filtername = None
426 self.filtername = None
427 # wvfs: rooted at the repository root, used to access the working copy
427 # wvfs: rooted at the repository root, used to access the working copy
428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
430 self.vfs = None
430 self.vfs = None
431 # svfs: usually rooted at .hg/store, used to access repository history
431 # svfs: usually rooted at .hg/store, used to access repository history
432 # If this is a shared repository, this vfs may point to another
432 # If this is a shared repository, this vfs may point to another
433 # repository's .hg/store directory.
433 # repository's .hg/store directory.
434 self.svfs = None
434 self.svfs = None
435 self.root = self.wvfs.base
435 self.root = self.wvfs.base
436 self.path = self.wvfs.join(".hg")
436 self.path = self.wvfs.join(".hg")
437 self.origroot = path
437 self.origroot = path
438 # This is only used by context.workingctx.match in order to
438 # This is only used by context.workingctx.match in order to
439 # detect files in subrepos.
439 # detect files in subrepos.
440 self.auditor = pathutil.pathauditor(
440 self.auditor = pathutil.pathauditor(
441 self.root, callback=self._checknested)
441 self.root, callback=self._checknested)
442 # This is only used by context.basectx.match in order to detect
442 # This is only used by context.basectx.match in order to detect
443 # files in subrepos.
443 # files in subrepos.
444 self.nofsauditor = pathutil.pathauditor(
444 self.nofsauditor = pathutil.pathauditor(
445 self.root, callback=self._checknested, realfs=False, cached=True)
445 self.root, callback=self._checknested, realfs=False, cached=True)
446 self.baseui = baseui
446 self.baseui = baseui
447 self.ui = baseui.copy()
447 self.ui = baseui.copy()
448 self.ui.copy = baseui.copy # prevent copying repo configuration
448 self.ui.copy = baseui.copy # prevent copying repo configuration
449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
450 if (self.ui.configbool('devel', 'all-warnings') or
450 if (self.ui.configbool('devel', 'all-warnings') or
451 self.ui.configbool('devel', 'check-locks')):
451 self.ui.configbool('devel', 'check-locks')):
452 self.vfs.audit = self._getvfsward(self.vfs.audit)
452 self.vfs.audit = self._getvfsward(self.vfs.audit)
453 # A list of callback to shape the phase if no data were found.
453 # A list of callback to shape the phase if no data were found.
454 # Callback are in the form: func(repo, roots) --> processed root.
454 # Callback are in the form: func(repo, roots) --> processed root.
455 # This list it to be filled by extension during repo setup
455 # This list it to be filled by extension during repo setup
456 self._phasedefaults = []
456 self._phasedefaults = []
457 try:
457 try:
458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
459 self._loadextensions()
459 self._loadextensions()
460 except IOError:
460 except IOError:
461 pass
461 pass
462
462
463 if featuresetupfuncs:
463 if featuresetupfuncs:
464 self.supported = set(self._basesupported) # use private copy
464 self.supported = set(self._basesupported) # use private copy
465 extmods = set(m.__name__ for n, m
465 extmods = set(m.__name__ for n, m
466 in extensions.extensions(self.ui))
466 in extensions.extensions(self.ui))
467 for setupfunc in featuresetupfuncs:
467 for setupfunc in featuresetupfuncs:
468 if setupfunc.__module__ in extmods:
468 if setupfunc.__module__ in extmods:
469 setupfunc(self.ui, self.supported)
469 setupfunc(self.ui, self.supported)
470 else:
470 else:
471 self.supported = self._basesupported
471 self.supported = self._basesupported
472 color.setup(self.ui)
472 color.setup(self.ui)
473
473
474 # Add compression engines.
474 # Add compression engines.
475 for name in util.compengines:
475 for name in util.compengines:
476 engine = util.compengines[name]
476 engine = util.compengines[name]
477 if engine.revlogheader():
477 if engine.revlogheader():
478 self.supported.add('exp-compression-%s' % name)
478 self.supported.add('exp-compression-%s' % name)
479
479
480 if not self.vfs.isdir():
480 if not self.vfs.isdir():
481 if create:
481 if create:
482 self.requirements = newreporequirements(self)
482 self.requirements = newreporequirements(self)
483
483
484 if not self.wvfs.exists():
484 if not self.wvfs.exists():
485 self.wvfs.makedirs()
485 self.wvfs.makedirs()
486 self.vfs.makedir(notindexed=True)
486 self.vfs.makedir(notindexed=True)
487
487
488 if 'store' in self.requirements:
488 if 'store' in self.requirements:
489 self.vfs.mkdir("store")
489 self.vfs.mkdir("store")
490
490
491 # create an invalid changelog
491 # create an invalid changelog
492 self.vfs.append(
492 self.vfs.append(
493 "00changelog.i",
493 "00changelog.i",
494 '\0\0\0\2' # represents revlogv2
494 '\0\0\0\2' # represents revlogv2
495 ' dummy changelog to prevent using the old repo layout'
495 ' dummy changelog to prevent using the old repo layout'
496 )
496 )
497 else:
497 else:
498 raise error.RepoError(_("repository %s not found") % path)
498 raise error.RepoError(_("repository %s not found") % path)
499 elif create:
499 elif create:
500 raise error.RepoError(_("repository %s already exists") % path)
500 raise error.RepoError(_("repository %s already exists") % path)
501 else:
501 else:
502 try:
502 try:
503 self.requirements = scmutil.readrequires(
503 self.requirements = scmutil.readrequires(
504 self.vfs, self.supported)
504 self.vfs, self.supported)
505 except IOError as inst:
505 except IOError as inst:
506 if inst.errno != errno.ENOENT:
506 if inst.errno != errno.ENOENT:
507 raise
507 raise
508
508
509 cachepath = self.vfs.join('cache')
509 cachepath = self.vfs.join('cache')
510 self.sharedpath = self.path
510 self.sharedpath = self.path
511 try:
511 try:
512 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
512 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
513 if 'relshared' in self.requirements:
513 if 'relshared' in self.requirements:
514 sharedpath = self.vfs.join(sharedpath)
514 sharedpath = self.vfs.join(sharedpath)
515 vfs = vfsmod.vfs(sharedpath, realpath=True)
515 vfs = vfsmod.vfs(sharedpath, realpath=True)
516 cachepath = vfs.join('cache')
516 cachepath = vfs.join('cache')
517 s = vfs.base
517 s = vfs.base
518 if not vfs.exists():
518 if not vfs.exists():
519 raise error.RepoError(
519 raise error.RepoError(
520 _('.hg/sharedpath points to nonexistent directory %s') % s)
520 _('.hg/sharedpath points to nonexistent directory %s') % s)
521 self.sharedpath = s
521 self.sharedpath = s
522 except IOError as inst:
522 except IOError as inst:
523 if inst.errno != errno.ENOENT:
523 if inst.errno != errno.ENOENT:
524 raise
524 raise
525
525
526 if 'exp-sparse' in self.requirements and not sparse.enabled:
526 if 'exp-sparse' in self.requirements and not sparse.enabled:
527 raise error.RepoError(_('repository is using sparse feature but '
527 raise error.RepoError(_('repository is using sparse feature but '
528 'sparse is not enabled; enable the '
528 'sparse is not enabled; enable the '
529 '"sparse" extensions to access'))
529 '"sparse" extensions to access'))
530
530
531 self.store = store.store(
531 self.store = store.store(
532 self.requirements, self.sharedpath,
532 self.requirements, self.sharedpath,
533 lambda base: vfsmod.vfs(base, cacheaudited=True))
533 lambda base: vfsmod.vfs(base, cacheaudited=True))
534 self.spath = self.store.path
534 self.spath = self.store.path
535 self.svfs = self.store.vfs
535 self.svfs = self.store.vfs
536 self.sjoin = self.store.join
536 self.sjoin = self.store.join
537 self.vfs.createmode = self.store.createmode
537 self.vfs.createmode = self.store.createmode
538 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
538 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
539 self.cachevfs.createmode = self.store.createmode
539 self.cachevfs.createmode = self.store.createmode
540 if (self.ui.configbool('devel', 'all-warnings') or
540 if (self.ui.configbool('devel', 'all-warnings') or
541 self.ui.configbool('devel', 'check-locks')):
541 self.ui.configbool('devel', 'check-locks')):
542 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
542 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
543 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
543 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
544 else: # standard vfs
544 else: # standard vfs
545 self.svfs.audit = self._getsvfsward(self.svfs.audit)
545 self.svfs.audit = self._getsvfsward(self.svfs.audit)
546 self._applyopenerreqs()
546 self._applyopenerreqs()
547 if create:
547 if create:
548 self._writerequirements()
548 self._writerequirements()
549
549
550 self._dirstatevalidatewarned = False
550 self._dirstatevalidatewarned = False
551
551
552 self._branchcaches = {}
552 self._branchcaches = {}
553 self._revbranchcache = None
553 self._revbranchcache = None
554 self._filterpats = {}
554 self._filterpats = {}
555 self._datafilters = {}
555 self._datafilters = {}
556 self._transref = self._lockref = self._wlockref = None
556 self._transref = self._lockref = self._wlockref = None
557
557
558 # A cache for various files under .hg/ that tracks file changes,
558 # A cache for various files under .hg/ that tracks file changes,
559 # (used by the filecache decorator)
559 # (used by the filecache decorator)
560 #
560 #
561 # Maps a property name to its util.filecacheentry
561 # Maps a property name to its util.filecacheentry
562 self._filecache = {}
562 self._filecache = {}
563
563
564 # hold sets of revision to be filtered
564 # hold sets of revision to be filtered
565 # should be cleared when something might have changed the filter value:
565 # should be cleared when something might have changed the filter value:
566 # - new changesets,
566 # - new changesets,
567 # - phase change,
567 # - phase change,
568 # - new obsolescence marker,
568 # - new obsolescence marker,
569 # - working directory parent change,
569 # - working directory parent change,
570 # - bookmark changes
570 # - bookmark changes
571 self.filteredrevcache = {}
571 self.filteredrevcache = {}
572
572
573 # post-dirstate-status hooks
573 # post-dirstate-status hooks
574 self._postdsstatus = []
574 self._postdsstatus = []
575
575
576 # generic mapping between names and nodes
576 # generic mapping between names and nodes
577 self.names = namespaces.namespaces()
577 self.names = namespaces.namespaces()
578
578
579 # Key to signature value.
579 # Key to signature value.
580 self._sparsesignaturecache = {}
580 self._sparsesignaturecache = {}
581 # Signature to cached matcher instance.
581 # Signature to cached matcher instance.
582 self._sparsematchercache = {}
582 self._sparsematchercache = {}
583
583
584 def _getvfsward(self, origfunc):
584 def _getvfsward(self, origfunc):
585 """build a ward for self.vfs"""
585 """build a ward for self.vfs"""
586 rref = weakref.ref(self)
586 rref = weakref.ref(self)
587 def checkvfs(path, mode=None):
587 def checkvfs(path, mode=None):
588 ret = origfunc(path, mode=mode)
588 ret = origfunc(path, mode=mode)
589 repo = rref()
589 repo = rref()
590 if (repo is None
590 if (repo is None
591 or not util.safehasattr(repo, '_wlockref')
591 or not util.safehasattr(repo, '_wlockref')
592 or not util.safehasattr(repo, '_lockref')):
592 or not util.safehasattr(repo, '_lockref')):
593 return
593 return
594 if mode in (None, 'r', 'rb'):
594 if mode in (None, 'r', 'rb'):
595 return
595 return
596 if path.startswith(repo.path):
596 if path.startswith(repo.path):
597 # truncate name relative to the repository (.hg)
597 # truncate name relative to the repository (.hg)
598 path = path[len(repo.path) + 1:]
598 path = path[len(repo.path) + 1:]
599 if path.startswith('cache/'):
599 if path.startswith('cache/'):
600 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
600 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
601 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
601 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
602 if path.startswith('journal.'):
602 if path.startswith('journal.'):
603 # journal is covered by 'lock'
603 # journal is covered by 'lock'
604 if repo._currentlock(repo._lockref) is None:
604 if repo._currentlock(repo._lockref) is None:
605 repo.ui.develwarn('write with no lock: "%s"' % path,
605 repo.ui.develwarn('write with no lock: "%s"' % path,
606 stacklevel=2, config='check-locks')
606 stacklevel=2, config='check-locks')
607 elif repo._currentlock(repo._wlockref) is None:
607 elif repo._currentlock(repo._wlockref) is None:
608 # rest of vfs files are covered by 'wlock'
608 # rest of vfs files are covered by 'wlock'
609 #
609 #
610 # exclude special files
610 # exclude special files
611 for prefix in self._wlockfreeprefix:
611 for prefix in self._wlockfreeprefix:
612 if path.startswith(prefix):
612 if path.startswith(prefix):
613 return
613 return
614 repo.ui.develwarn('write with no wlock: "%s"' % path,
614 repo.ui.develwarn('write with no wlock: "%s"' % path,
615 stacklevel=2, config='check-locks')
615 stacklevel=2, config='check-locks')
616 return ret
616 return ret
617 return checkvfs
617 return checkvfs
618
618
619 def _getsvfsward(self, origfunc):
619 def _getsvfsward(self, origfunc):
620 """build a ward for self.svfs"""
620 """build a ward for self.svfs"""
621 rref = weakref.ref(self)
621 rref = weakref.ref(self)
622 def checksvfs(path, mode=None):
622 def checksvfs(path, mode=None):
623 ret = origfunc(path, mode=mode)
623 ret = origfunc(path, mode=mode)
624 repo = rref()
624 repo = rref()
625 if repo is None or not util.safehasattr(repo, '_lockref'):
625 if repo is None or not util.safehasattr(repo, '_lockref'):
626 return
626 return
627 if mode in (None, 'r', 'rb'):
627 if mode in (None, 'r', 'rb'):
628 return
628 return
629 if path.startswith(repo.sharedpath):
629 if path.startswith(repo.sharedpath):
630 # truncate name relative to the repository (.hg)
630 # truncate name relative to the repository (.hg)
631 path = path[len(repo.sharedpath) + 1:]
631 path = path[len(repo.sharedpath) + 1:]
632 if repo._currentlock(repo._lockref) is None:
632 if repo._currentlock(repo._lockref) is None:
633 repo.ui.develwarn('write with no lock: "%s"' % path,
633 repo.ui.develwarn('write with no lock: "%s"' % path,
634 stacklevel=3)
634 stacklevel=3)
635 return ret
635 return ret
636 return checksvfs
636 return checksvfs
637
637
638 def close(self):
638 def close(self):
639 self._writecaches()
639 self._writecaches()
640
640
641 def _loadextensions(self):
641 def _loadextensions(self):
642 extensions.loadall(self.ui)
642 extensions.loadall(self.ui)
643
643
644 def _writecaches(self):
644 def _writecaches(self):
645 if self._revbranchcache:
645 if self._revbranchcache:
646 self._revbranchcache.write()
646 self._revbranchcache.write()
647
647
648 def _restrictcapabilities(self, caps):
648 def _restrictcapabilities(self, caps):
649 if self.ui.configbool('experimental', 'bundle2-advertise'):
649 if self.ui.configbool('experimental', 'bundle2-advertise'):
650 caps = set(caps)
650 caps = set(caps)
651 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
651 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
652 role='client'))
652 role='client'))
653 caps.add('bundle2=' + urlreq.quote(capsblob))
653 caps.add('bundle2=' + urlreq.quote(capsblob))
654 return caps
654 return caps
655
655
656 def _applyopenerreqs(self):
656 def _applyopenerreqs(self):
657 self.svfs.options = dict((r, 1) for r in self.requirements
657 self.svfs.options = dict((r, 1) for r in self.requirements
658 if r in self.openerreqs)
658 if r in self.openerreqs)
659 # experimental config: format.chunkcachesize
659 # experimental config: format.chunkcachesize
660 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
660 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
661 if chunkcachesize is not None:
661 if chunkcachesize is not None:
662 self.svfs.options['chunkcachesize'] = chunkcachesize
662 self.svfs.options['chunkcachesize'] = chunkcachesize
663 # experimental config: format.maxchainlen
663 # experimental config: format.maxchainlen
664 maxchainlen = self.ui.configint('format', 'maxchainlen')
664 maxchainlen = self.ui.configint('format', 'maxchainlen')
665 if maxchainlen is not None:
665 if maxchainlen is not None:
666 self.svfs.options['maxchainlen'] = maxchainlen
666 self.svfs.options['maxchainlen'] = maxchainlen
667 # experimental config: format.manifestcachesize
667 # experimental config: format.manifestcachesize
668 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
668 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
669 if manifestcachesize is not None:
669 if manifestcachesize is not None:
670 self.svfs.options['manifestcachesize'] = manifestcachesize
670 self.svfs.options['manifestcachesize'] = manifestcachesize
671 deltabothparents = self.ui.configbool('storage',
671 deltabothparents = self.ui.configbool('storage',
672 'revlog.optimize-delta-parent-choice')
672 'revlog.optimize-delta-parent-choice')
673 self.svfs.options['deltabothparents'] = deltabothparents
673 self.svfs.options['deltabothparents'] = deltabothparents
674 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
674 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
675 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
675 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
676 if 0 <= chainspan:
676 if 0 <= chainspan:
677 self.svfs.options['maxdeltachainspan'] = chainspan
677 self.svfs.options['maxdeltachainspan'] = chainspan
678 mmapindexthreshold = self.ui.configbytes('experimental',
678 mmapindexthreshold = self.ui.configbytes('experimental',
679 'mmapindexthreshold')
679 'mmapindexthreshold')
680 if mmapindexthreshold is not None:
680 if mmapindexthreshold is not None:
681 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
681 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
682 withsparseread = self.ui.configbool('experimental', 'sparse-read')
682 withsparseread = self.ui.configbool('experimental', 'sparse-read')
683 srdensitythres = float(self.ui.config('experimental',
683 srdensitythres = float(self.ui.config('experimental',
684 'sparse-read.density-threshold'))
684 'sparse-read.density-threshold'))
685 srmingapsize = self.ui.configbytes('experimental',
685 srmingapsize = self.ui.configbytes('experimental',
686 'sparse-read.min-gap-size')
686 'sparse-read.min-gap-size')
687 self.svfs.options['with-sparse-read'] = withsparseread
687 self.svfs.options['with-sparse-read'] = withsparseread
688 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
688 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
689 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
689 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
690 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
690 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
691 self.svfs.options['sparse-revlog'] = sparserevlog
691 self.svfs.options['sparse-revlog'] = sparserevlog
692 if sparserevlog:
692 if sparserevlog:
693 self.svfs.options['generaldelta'] = True
693 self.svfs.options['generaldelta'] = True
694
694
695 for r in self.requirements:
695 for r in self.requirements:
696 if r.startswith('exp-compression-'):
696 if r.startswith('exp-compression-'):
697 self.svfs.options['compengine'] = r[len('exp-compression-'):]
697 self.svfs.options['compengine'] = r[len('exp-compression-'):]
698
698
699 # TODO move "revlogv2" to openerreqs once finalized.
699 # TODO move "revlogv2" to openerreqs once finalized.
700 if REVLOGV2_REQUIREMENT in self.requirements:
700 if REVLOGV2_REQUIREMENT in self.requirements:
701 self.svfs.options['revlogv2'] = True
701 self.svfs.options['revlogv2'] = True
702
702
703 def _writerequirements(self):
703 def _writerequirements(self):
704 scmutil.writerequires(self.vfs, self.requirements)
704 scmutil.writerequires(self.vfs, self.requirements)
705
705
706 def _checknested(self, path):
706 def _checknested(self, path):
707 """Determine if path is a legal nested repository."""
707 """Determine if path is a legal nested repository."""
708 if not path.startswith(self.root):
708 if not path.startswith(self.root):
709 return False
709 return False
710 subpath = path[len(self.root) + 1:]
710 subpath = path[len(self.root) + 1:]
711 normsubpath = util.pconvert(subpath)
711 normsubpath = util.pconvert(subpath)
712
712
713 # XXX: Checking against the current working copy is wrong in
713 # XXX: Checking against the current working copy is wrong in
714 # the sense that it can reject things like
714 # the sense that it can reject things like
715 #
715 #
716 # $ hg cat -r 10 sub/x.txt
716 # $ hg cat -r 10 sub/x.txt
717 #
717 #
718 # if sub/ is no longer a subrepository in the working copy
718 # if sub/ is no longer a subrepository in the working copy
719 # parent revision.
719 # parent revision.
720 #
720 #
721 # However, it can of course also allow things that would have
721 # However, it can of course also allow things that would have
722 # been rejected before, such as the above cat command if sub/
722 # been rejected before, such as the above cat command if sub/
723 # is a subrepository now, but was a normal directory before.
723 # is a subrepository now, but was a normal directory before.
724 # The old path auditor would have rejected by mistake since it
724 # The old path auditor would have rejected by mistake since it
725 # panics when it sees sub/.hg/.
725 # panics when it sees sub/.hg/.
726 #
726 #
727 # All in all, checking against the working copy seems sensible
727 # All in all, checking against the working copy seems sensible
728 # since we want to prevent access to nested repositories on
728 # since we want to prevent access to nested repositories on
729 # the filesystem *now*.
729 # the filesystem *now*.
730 ctx = self[None]
730 ctx = self[None]
731 parts = util.splitpath(subpath)
731 parts = util.splitpath(subpath)
732 while parts:
732 while parts:
733 prefix = '/'.join(parts)
733 prefix = '/'.join(parts)
734 if prefix in ctx.substate:
734 if prefix in ctx.substate:
735 if prefix == normsubpath:
735 if prefix == normsubpath:
736 return True
736 return True
737 else:
737 else:
738 sub = ctx.sub(prefix)
738 sub = ctx.sub(prefix)
739 return sub.checknested(subpath[len(prefix) + 1:])
739 return sub.checknested(subpath[len(prefix) + 1:])
740 else:
740 else:
741 parts.pop()
741 parts.pop()
742 return False
742 return False
743
743
744 def peer(self):
744 def peer(self):
745 return localpeer(self) # not cached to avoid reference cycle
745 return localpeer(self) # not cached to avoid reference cycle
746
746
747 def unfiltered(self):
747 def unfiltered(self):
748 """Return unfiltered version of the repository
748 """Return unfiltered version of the repository
749
749
750 Intended to be overwritten by filtered repo."""
750 Intended to be overwritten by filtered repo."""
751 return self
751 return self
752
752
753 def filtered(self, name, visibilityexceptions=None):
753 def filtered(self, name, visibilityexceptions=None):
754 """Return a filtered version of a repository"""
754 """Return a filtered version of a repository"""
755 cls = repoview.newtype(self.unfiltered().__class__)
755 cls = repoview.newtype(self.unfiltered().__class__)
756 return cls(self, name, visibilityexceptions)
756 return cls(self, name, visibilityexceptions)
757
757
758 @repofilecache('bookmarks', 'bookmarks.current')
758 @repofilecache('bookmarks', 'bookmarks.current')
759 def _bookmarks(self):
759 def _bookmarks(self):
760 return bookmarks.bmstore(self)
760 return bookmarks.bmstore(self)
761
761
762 @property
762 @property
763 def _activebookmark(self):
763 def _activebookmark(self):
764 return self._bookmarks.active
764 return self._bookmarks.active
765
765
766 # _phasesets depend on changelog. what we need is to call
766 # _phasesets depend on changelog. what we need is to call
767 # _phasecache.invalidate() if '00changelog.i' was changed, but it
767 # _phasecache.invalidate() if '00changelog.i' was changed, but it
768 # can't be easily expressed in filecache mechanism.
768 # can't be easily expressed in filecache mechanism.
769 @storecache('phaseroots', '00changelog.i')
769 @storecache('phaseroots', '00changelog.i')
770 def _phasecache(self):
770 def _phasecache(self):
771 return phases.phasecache(self, self._phasedefaults)
771 return phases.phasecache(self, self._phasedefaults)
772
772
773 @storecache('obsstore')
773 @storecache('obsstore')
774 def obsstore(self):
774 def obsstore(self):
775 return obsolete.makestore(self.ui, self)
775 return obsolete.makestore(self.ui, self)
776
776
777 @storecache('00changelog.i')
777 @storecache('00changelog.i')
778 def changelog(self):
778 def changelog(self):
779 return changelog.changelog(self.svfs,
779 return changelog.changelog(self.svfs,
780 trypending=txnutil.mayhavepending(self.root))
780 trypending=txnutil.mayhavepending(self.root))
781
781
782 def _constructmanifest(self):
782 def _constructmanifest(self):
783 # This is a temporary function while we migrate from manifest to
783 # This is a temporary function while we migrate from manifest to
784 # manifestlog. It allows bundlerepo and unionrepo to intercept the
784 # manifestlog. It allows bundlerepo and unionrepo to intercept the
785 # manifest creation.
785 # manifest creation.
786 return manifest.manifestrevlog(self.svfs)
786 return manifest.manifestrevlog(self.svfs)
787
787
788 @storecache('00manifest.i')
788 @storecache('00manifest.i')
789 def manifestlog(self):
789 def manifestlog(self):
790 return manifest.manifestlog(self.svfs, self)
790 return manifest.manifestlog(self.svfs, self)
791
791
792 @repofilecache('dirstate')
792 @repofilecache('dirstate')
793 def dirstate(self):
793 def dirstate(self):
794 return self._makedirstate()
794 return self._makedirstate()
795
795
796 def _makedirstate(self):
796 def _makedirstate(self):
797 """Extension point for wrapping the dirstate per-repo."""
797 """Extension point for wrapping the dirstate per-repo."""
798 sparsematchfn = lambda: sparse.matcher(self)
798 sparsematchfn = lambda: sparse.matcher(self)
799
799
800 return dirstate.dirstate(self.vfs, self.ui, self.root,
800 return dirstate.dirstate(self.vfs, self.ui, self.root,
801 self._dirstatevalidate, sparsematchfn)
801 self._dirstatevalidate, sparsematchfn)
802
802
803 def _dirstatevalidate(self, node):
803 def _dirstatevalidate(self, node):
804 try:
804 try:
805 self.changelog.rev(node)
805 self.changelog.rev(node)
806 return node
806 return node
807 except error.LookupError:
807 except error.LookupError:
808 if not self._dirstatevalidatewarned:
808 if not self._dirstatevalidatewarned:
809 self._dirstatevalidatewarned = True
809 self._dirstatevalidatewarned = True
810 self.ui.warn(_("warning: ignoring unknown"
810 self.ui.warn(_("warning: ignoring unknown"
811 " working parent %s!\n") % short(node))
811 " working parent %s!\n") % short(node))
812 return nullid
812 return nullid
813
813
814 @repofilecache(narrowspec.FILENAME)
814 @storecache(narrowspec.FILENAME)
815 def narrowpats(self):
815 def narrowpats(self):
816 """matcher patterns for this repository's narrowspec
816 """matcher patterns for this repository's narrowspec
817
817
818 A tuple of (includes, excludes).
818 A tuple of (includes, excludes).
819 """
819 """
820 source = self
820 source = self
821 if self.shared():
821 if self.shared():
822 from . import hg
822 from . import hg
823 source = hg.sharedreposource(self)
823 source = hg.sharedreposource(self)
824 return narrowspec.load(source)
824 return narrowspec.load(source)
825
825
826 @repofilecache(narrowspec.FILENAME)
826 @storecache(narrowspec.FILENAME)
827 def _narrowmatch(self):
827 def _narrowmatch(self):
828 if repository.NARROW_REQUIREMENT not in self.requirements:
828 if repository.NARROW_REQUIREMENT not in self.requirements:
829 return matchmod.always(self.root, '')
829 return matchmod.always(self.root, '')
830 include, exclude = self.narrowpats
830 include, exclude = self.narrowpats
831 return narrowspec.match(self.root, include=include, exclude=exclude)
831 return narrowspec.match(self.root, include=include, exclude=exclude)
832
832
833 # TODO(martinvonz): make this property-like instead?
833 # TODO(martinvonz): make this property-like instead?
834 def narrowmatch(self):
834 def narrowmatch(self):
835 return self._narrowmatch
835 return self._narrowmatch
836
836
837 def setnarrowpats(self, newincludes, newexcludes):
837 def setnarrowpats(self, newincludes, newexcludes):
838 target = self
838 target = self
839 if self.shared():
839 if self.shared():
840 from . import hg
840 from . import hg
841 target = hg.sharedreposource(self)
841 target = hg.sharedreposource(self)
842 narrowspec.save(target, newincludes, newexcludes)
842 narrowspec.save(target, newincludes, newexcludes)
843 self.invalidate(clearfilecache=True)
843 self.invalidate(clearfilecache=True)
844
844
845 def __getitem__(self, changeid):
845 def __getitem__(self, changeid):
846 if changeid is None:
846 if changeid is None:
847 return context.workingctx(self)
847 return context.workingctx(self)
848 if isinstance(changeid, context.basectx):
848 if isinstance(changeid, context.basectx):
849 return changeid
849 return changeid
850 if isinstance(changeid, slice):
850 if isinstance(changeid, slice):
851 # wdirrev isn't contiguous so the slice shouldn't include it
851 # wdirrev isn't contiguous so the slice shouldn't include it
852 return [context.changectx(self, i)
852 return [context.changectx(self, i)
853 for i in pycompat.xrange(*changeid.indices(len(self)))
853 for i in pycompat.xrange(*changeid.indices(len(self)))
854 if i not in self.changelog.filteredrevs]
854 if i not in self.changelog.filteredrevs]
855 try:
855 try:
856 return context.changectx(self, changeid)
856 return context.changectx(self, changeid)
857 except error.WdirUnsupported:
857 except error.WdirUnsupported:
858 return context.workingctx(self)
858 return context.workingctx(self)
859
859
860 def __contains__(self, changeid):
860 def __contains__(self, changeid):
861 """True if the given changeid exists
861 """True if the given changeid exists
862
862
863 error.AmbiguousPrefixLookupError is raised if an ambiguous node
863 error.AmbiguousPrefixLookupError is raised if an ambiguous node
864 specified.
864 specified.
865 """
865 """
866 try:
866 try:
867 self[changeid]
867 self[changeid]
868 return True
868 return True
869 except error.RepoLookupError:
869 except error.RepoLookupError:
870 return False
870 return False
871
871
872 def __nonzero__(self):
872 def __nonzero__(self):
873 return True
873 return True
874
874
875 __bool__ = __nonzero__
875 __bool__ = __nonzero__
876
876
877 def __len__(self):
877 def __len__(self):
878 # no need to pay the cost of repoview.changelog
878 # no need to pay the cost of repoview.changelog
879 unfi = self.unfiltered()
879 unfi = self.unfiltered()
880 return len(unfi.changelog)
880 return len(unfi.changelog)
881
881
882 def __iter__(self):
882 def __iter__(self):
883 return iter(self.changelog)
883 return iter(self.changelog)
884
884
885 def revs(self, expr, *args):
885 def revs(self, expr, *args):
886 '''Find revisions matching a revset.
886 '''Find revisions matching a revset.
887
887
888 The revset is specified as a string ``expr`` that may contain
888 The revset is specified as a string ``expr`` that may contain
889 %-formatting to escape certain types. See ``revsetlang.formatspec``.
889 %-formatting to escape certain types. See ``revsetlang.formatspec``.
890
890
891 Revset aliases from the configuration are not expanded. To expand
891 Revset aliases from the configuration are not expanded. To expand
892 user aliases, consider calling ``scmutil.revrange()`` or
892 user aliases, consider calling ``scmutil.revrange()`` or
893 ``repo.anyrevs([expr], user=True)``.
893 ``repo.anyrevs([expr], user=True)``.
894
894
895 Returns a revset.abstractsmartset, which is a list-like interface
895 Returns a revset.abstractsmartset, which is a list-like interface
896 that contains integer revisions.
896 that contains integer revisions.
897 '''
897 '''
898 expr = revsetlang.formatspec(expr, *args)
898 expr = revsetlang.formatspec(expr, *args)
899 m = revset.match(None, expr)
899 m = revset.match(None, expr)
900 return m(self)
900 return m(self)
901
901
902 def set(self, expr, *args):
902 def set(self, expr, *args):
903 '''Find revisions matching a revset and emit changectx instances.
903 '''Find revisions matching a revset and emit changectx instances.
904
904
905 This is a convenience wrapper around ``revs()`` that iterates the
905 This is a convenience wrapper around ``revs()`` that iterates the
906 result and is a generator of changectx instances.
906 result and is a generator of changectx instances.
907
907
908 Revset aliases from the configuration are not expanded. To expand
908 Revset aliases from the configuration are not expanded. To expand
909 user aliases, consider calling ``scmutil.revrange()``.
909 user aliases, consider calling ``scmutil.revrange()``.
910 '''
910 '''
911 for r in self.revs(expr, *args):
911 for r in self.revs(expr, *args):
912 yield self[r]
912 yield self[r]
913
913
914 def anyrevs(self, specs, user=False, localalias=None):
914 def anyrevs(self, specs, user=False, localalias=None):
915 '''Find revisions matching one of the given revsets.
915 '''Find revisions matching one of the given revsets.
916
916
917 Revset aliases from the configuration are not expanded by default. To
917 Revset aliases from the configuration are not expanded by default. To
918 expand user aliases, specify ``user=True``. To provide some local
918 expand user aliases, specify ``user=True``. To provide some local
919 definitions overriding user aliases, set ``localalias`` to
919 definitions overriding user aliases, set ``localalias`` to
920 ``{name: definitionstring}``.
920 ``{name: definitionstring}``.
921 '''
921 '''
922 if user:
922 if user:
923 m = revset.matchany(self.ui, specs,
923 m = revset.matchany(self.ui, specs,
924 lookup=revset.lookupfn(self),
924 lookup=revset.lookupfn(self),
925 localalias=localalias)
925 localalias=localalias)
926 else:
926 else:
927 m = revset.matchany(None, specs, localalias=localalias)
927 m = revset.matchany(None, specs, localalias=localalias)
928 return m(self)
928 return m(self)
929
929
930 def url(self):
930 def url(self):
931 return 'file:' + self.root
931 return 'file:' + self.root
932
932
933 def hook(self, name, throw=False, **args):
933 def hook(self, name, throw=False, **args):
934 """Call a hook, passing this repo instance.
934 """Call a hook, passing this repo instance.
935
935
936 This a convenience method to aid invoking hooks. Extensions likely
936 This a convenience method to aid invoking hooks. Extensions likely
937 won't call this unless they have registered a custom hook or are
937 won't call this unless they have registered a custom hook or are
938 replacing code that is expected to call a hook.
938 replacing code that is expected to call a hook.
939 """
939 """
940 return hook.hook(self.ui, self, name, throw, **args)
940 return hook.hook(self.ui, self, name, throw, **args)
941
941
942 @filteredpropertycache
942 @filteredpropertycache
943 def _tagscache(self):
943 def _tagscache(self):
944 '''Returns a tagscache object that contains various tags related
944 '''Returns a tagscache object that contains various tags related
945 caches.'''
945 caches.'''
946
946
947 # This simplifies its cache management by having one decorated
947 # This simplifies its cache management by having one decorated
948 # function (this one) and the rest simply fetch things from it.
948 # function (this one) and the rest simply fetch things from it.
949 class tagscache(object):
949 class tagscache(object):
950 def __init__(self):
950 def __init__(self):
951 # These two define the set of tags for this repository. tags
951 # These two define the set of tags for this repository. tags
952 # maps tag name to node; tagtypes maps tag name to 'global' or
952 # maps tag name to node; tagtypes maps tag name to 'global' or
953 # 'local'. (Global tags are defined by .hgtags across all
953 # 'local'. (Global tags are defined by .hgtags across all
954 # heads, and local tags are defined in .hg/localtags.)
954 # heads, and local tags are defined in .hg/localtags.)
955 # They constitute the in-memory cache of tags.
955 # They constitute the in-memory cache of tags.
956 self.tags = self.tagtypes = None
956 self.tags = self.tagtypes = None
957
957
958 self.nodetagscache = self.tagslist = None
958 self.nodetagscache = self.tagslist = None
959
959
960 cache = tagscache()
960 cache = tagscache()
961 cache.tags, cache.tagtypes = self._findtags()
961 cache.tags, cache.tagtypes = self._findtags()
962
962
963 return cache
963 return cache
964
964
965 def tags(self):
965 def tags(self):
966 '''return a mapping of tag to node'''
966 '''return a mapping of tag to node'''
967 t = {}
967 t = {}
968 if self.changelog.filteredrevs:
968 if self.changelog.filteredrevs:
969 tags, tt = self._findtags()
969 tags, tt = self._findtags()
970 else:
970 else:
971 tags = self._tagscache.tags
971 tags = self._tagscache.tags
972 for k, v in tags.iteritems():
972 for k, v in tags.iteritems():
973 try:
973 try:
974 # ignore tags to unknown nodes
974 # ignore tags to unknown nodes
975 self.changelog.rev(v)
975 self.changelog.rev(v)
976 t[k] = v
976 t[k] = v
977 except (error.LookupError, ValueError):
977 except (error.LookupError, ValueError):
978 pass
978 pass
979 return t
979 return t
980
980
981 def _findtags(self):
981 def _findtags(self):
982 '''Do the hard work of finding tags. Return a pair of dicts
982 '''Do the hard work of finding tags. Return a pair of dicts
983 (tags, tagtypes) where tags maps tag name to node, and tagtypes
983 (tags, tagtypes) where tags maps tag name to node, and tagtypes
984 maps tag name to a string like \'global\' or \'local\'.
984 maps tag name to a string like \'global\' or \'local\'.
985 Subclasses or extensions are free to add their own tags, but
985 Subclasses or extensions are free to add their own tags, but
986 should be aware that the returned dicts will be retained for the
986 should be aware that the returned dicts will be retained for the
987 duration of the localrepo object.'''
987 duration of the localrepo object.'''
988
988
989 # XXX what tagtype should subclasses/extensions use? Currently
989 # XXX what tagtype should subclasses/extensions use? Currently
990 # mq and bookmarks add tags, but do not set the tagtype at all.
990 # mq and bookmarks add tags, but do not set the tagtype at all.
991 # Should each extension invent its own tag type? Should there
991 # Should each extension invent its own tag type? Should there
992 # be one tagtype for all such "virtual" tags? Or is the status
992 # be one tagtype for all such "virtual" tags? Or is the status
993 # quo fine?
993 # quo fine?
994
994
995
995
996 # map tag name to (node, hist)
996 # map tag name to (node, hist)
997 alltags = tagsmod.findglobaltags(self.ui, self)
997 alltags = tagsmod.findglobaltags(self.ui, self)
998 # map tag name to tag type
998 # map tag name to tag type
999 tagtypes = dict((tag, 'global') for tag in alltags)
999 tagtypes = dict((tag, 'global') for tag in alltags)
1000
1000
1001 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1001 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1002
1002
1003 # Build the return dicts. Have to re-encode tag names because
1003 # Build the return dicts. Have to re-encode tag names because
1004 # the tags module always uses UTF-8 (in order not to lose info
1004 # the tags module always uses UTF-8 (in order not to lose info
1005 # writing to the cache), but the rest of Mercurial wants them in
1005 # writing to the cache), but the rest of Mercurial wants them in
1006 # local encoding.
1006 # local encoding.
1007 tags = {}
1007 tags = {}
1008 for (name, (node, hist)) in alltags.iteritems():
1008 for (name, (node, hist)) in alltags.iteritems():
1009 if node != nullid:
1009 if node != nullid:
1010 tags[encoding.tolocal(name)] = node
1010 tags[encoding.tolocal(name)] = node
1011 tags['tip'] = self.changelog.tip()
1011 tags['tip'] = self.changelog.tip()
1012 tagtypes = dict([(encoding.tolocal(name), value)
1012 tagtypes = dict([(encoding.tolocal(name), value)
1013 for (name, value) in tagtypes.iteritems()])
1013 for (name, value) in tagtypes.iteritems()])
1014 return (tags, tagtypes)
1014 return (tags, tagtypes)
1015
1015
1016 def tagtype(self, tagname):
1016 def tagtype(self, tagname):
1017 '''
1017 '''
1018 return the type of the given tag. result can be:
1018 return the type of the given tag. result can be:
1019
1019
1020 'local' : a local tag
1020 'local' : a local tag
1021 'global' : a global tag
1021 'global' : a global tag
1022 None : tag does not exist
1022 None : tag does not exist
1023 '''
1023 '''
1024
1024
1025 return self._tagscache.tagtypes.get(tagname)
1025 return self._tagscache.tagtypes.get(tagname)
1026
1026
1027 def tagslist(self):
1027 def tagslist(self):
1028 '''return a list of tags ordered by revision'''
1028 '''return a list of tags ordered by revision'''
1029 if not self._tagscache.tagslist:
1029 if not self._tagscache.tagslist:
1030 l = []
1030 l = []
1031 for t, n in self.tags().iteritems():
1031 for t, n in self.tags().iteritems():
1032 l.append((self.changelog.rev(n), t, n))
1032 l.append((self.changelog.rev(n), t, n))
1033 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1033 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1034
1034
1035 return self._tagscache.tagslist
1035 return self._tagscache.tagslist
1036
1036
1037 def nodetags(self, node):
1037 def nodetags(self, node):
1038 '''return the tags associated with a node'''
1038 '''return the tags associated with a node'''
1039 if not self._tagscache.nodetagscache:
1039 if not self._tagscache.nodetagscache:
1040 nodetagscache = {}
1040 nodetagscache = {}
1041 for t, n in self._tagscache.tags.iteritems():
1041 for t, n in self._tagscache.tags.iteritems():
1042 nodetagscache.setdefault(n, []).append(t)
1042 nodetagscache.setdefault(n, []).append(t)
1043 for tags in nodetagscache.itervalues():
1043 for tags in nodetagscache.itervalues():
1044 tags.sort()
1044 tags.sort()
1045 self._tagscache.nodetagscache = nodetagscache
1045 self._tagscache.nodetagscache = nodetagscache
1046 return self._tagscache.nodetagscache.get(node, [])
1046 return self._tagscache.nodetagscache.get(node, [])
1047
1047
1048 def nodebookmarks(self, node):
1048 def nodebookmarks(self, node):
1049 """return the list of bookmarks pointing to the specified node"""
1049 """return the list of bookmarks pointing to the specified node"""
1050 return self._bookmarks.names(node)
1050 return self._bookmarks.names(node)
1051
1051
1052 def branchmap(self):
1052 def branchmap(self):
1053 '''returns a dictionary {branch: [branchheads]} with branchheads
1053 '''returns a dictionary {branch: [branchheads]} with branchheads
1054 ordered by increasing revision number'''
1054 ordered by increasing revision number'''
1055 branchmap.updatecache(self)
1055 branchmap.updatecache(self)
1056 return self._branchcaches[self.filtername]
1056 return self._branchcaches[self.filtername]
1057
1057
1058 @unfilteredmethod
1058 @unfilteredmethod
1059 def revbranchcache(self):
1059 def revbranchcache(self):
1060 if not self._revbranchcache:
1060 if not self._revbranchcache:
1061 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1061 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1062 return self._revbranchcache
1062 return self._revbranchcache
1063
1063
1064 def branchtip(self, branch, ignoremissing=False):
1064 def branchtip(self, branch, ignoremissing=False):
1065 '''return the tip node for a given branch
1065 '''return the tip node for a given branch
1066
1066
1067 If ignoremissing is True, then this method will not raise an error.
1067 If ignoremissing is True, then this method will not raise an error.
1068 This is helpful for callers that only expect None for a missing branch
1068 This is helpful for callers that only expect None for a missing branch
1069 (e.g. namespace).
1069 (e.g. namespace).
1070
1070
1071 '''
1071 '''
1072 try:
1072 try:
1073 return self.branchmap().branchtip(branch)
1073 return self.branchmap().branchtip(branch)
1074 except KeyError:
1074 except KeyError:
1075 if not ignoremissing:
1075 if not ignoremissing:
1076 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1076 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1077 else:
1077 else:
1078 pass
1078 pass
1079
1079
1080 def lookup(self, key):
1080 def lookup(self, key):
1081 return scmutil.revsymbol(self, key).node()
1081 return scmutil.revsymbol(self, key).node()
1082
1082
1083 def lookupbranch(self, key):
1083 def lookupbranch(self, key):
1084 if key in self.branchmap():
1084 if key in self.branchmap():
1085 return key
1085 return key
1086
1086
1087 return scmutil.revsymbol(self, key).branch()
1087 return scmutil.revsymbol(self, key).branch()
1088
1088
1089 def known(self, nodes):
1089 def known(self, nodes):
1090 cl = self.changelog
1090 cl = self.changelog
1091 nm = cl.nodemap
1091 nm = cl.nodemap
1092 filtered = cl.filteredrevs
1092 filtered = cl.filteredrevs
1093 result = []
1093 result = []
1094 for n in nodes:
1094 for n in nodes:
1095 r = nm.get(n)
1095 r = nm.get(n)
1096 resp = not (r is None or r in filtered)
1096 resp = not (r is None or r in filtered)
1097 result.append(resp)
1097 result.append(resp)
1098 return result
1098 return result
1099
1099
1100 def local(self):
1100 def local(self):
1101 return self
1101 return self
1102
1102
1103 def publishing(self):
1103 def publishing(self):
1104 # it's safe (and desirable) to trust the publish flag unconditionally
1104 # it's safe (and desirable) to trust the publish flag unconditionally
1105 # so that we don't finalize changes shared between users via ssh or nfs
1105 # so that we don't finalize changes shared between users via ssh or nfs
1106 return self.ui.configbool('phases', 'publish', untrusted=True)
1106 return self.ui.configbool('phases', 'publish', untrusted=True)
1107
1107
1108 def cancopy(self):
1108 def cancopy(self):
1109 # so statichttprepo's override of local() works
1109 # so statichttprepo's override of local() works
1110 if not self.local():
1110 if not self.local():
1111 return False
1111 return False
1112 if not self.publishing():
1112 if not self.publishing():
1113 return True
1113 return True
1114 # if publishing we can't copy if there is filtered content
1114 # if publishing we can't copy if there is filtered content
1115 return not self.filtered('visible').changelog.filteredrevs
1115 return not self.filtered('visible').changelog.filteredrevs
1116
1116
1117 def shared(self):
1117 def shared(self):
1118 '''the type of shared repository (None if not shared)'''
1118 '''the type of shared repository (None if not shared)'''
1119 if self.sharedpath != self.path:
1119 if self.sharedpath != self.path:
1120 return 'store'
1120 return 'store'
1121 return None
1121 return None
1122
1122
1123 def wjoin(self, f, *insidef):
1123 def wjoin(self, f, *insidef):
1124 return self.vfs.reljoin(self.root, f, *insidef)
1124 return self.vfs.reljoin(self.root, f, *insidef)
1125
1125
1126 def file(self, f):
1126 def file(self, f):
1127 if f[0] == '/':
1127 if f[0] == '/':
1128 f = f[1:]
1128 f = f[1:]
1129 return filelog.filelog(self.svfs, f)
1129 return filelog.filelog(self.svfs, f)
1130
1130
1131 def setparents(self, p1, p2=nullid):
1131 def setparents(self, p1, p2=nullid):
1132 with self.dirstate.parentchange():
1132 with self.dirstate.parentchange():
1133 copies = self.dirstate.setparents(p1, p2)
1133 copies = self.dirstate.setparents(p1, p2)
1134 pctx = self[p1]
1134 pctx = self[p1]
1135 if copies:
1135 if copies:
1136 # Adjust copy records, the dirstate cannot do it, it
1136 # Adjust copy records, the dirstate cannot do it, it
1137 # requires access to parents manifests. Preserve them
1137 # requires access to parents manifests. Preserve them
1138 # only for entries added to first parent.
1138 # only for entries added to first parent.
1139 for f in copies:
1139 for f in copies:
1140 if f not in pctx and copies[f] in pctx:
1140 if f not in pctx and copies[f] in pctx:
1141 self.dirstate.copy(copies[f], f)
1141 self.dirstate.copy(copies[f], f)
1142 if p2 == nullid:
1142 if p2 == nullid:
1143 for f, s in sorted(self.dirstate.copies().items()):
1143 for f, s in sorted(self.dirstate.copies().items()):
1144 if f not in pctx and s not in pctx:
1144 if f not in pctx and s not in pctx:
1145 self.dirstate.copy(None, f)
1145 self.dirstate.copy(None, f)
1146
1146
1147 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1147 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1148 """changeid can be a changeset revision, node, or tag.
1148 """changeid can be a changeset revision, node, or tag.
1149 fileid can be a file revision or node."""
1149 fileid can be a file revision or node."""
1150 return context.filectx(self, path, changeid, fileid,
1150 return context.filectx(self, path, changeid, fileid,
1151 changectx=changectx)
1151 changectx=changectx)
1152
1152
1153 def getcwd(self):
1153 def getcwd(self):
1154 return self.dirstate.getcwd()
1154 return self.dirstate.getcwd()
1155
1155
1156 def pathto(self, f, cwd=None):
1156 def pathto(self, f, cwd=None):
1157 return self.dirstate.pathto(f, cwd)
1157 return self.dirstate.pathto(f, cwd)
1158
1158
1159 def _loadfilter(self, filter):
1159 def _loadfilter(self, filter):
1160 if filter not in self._filterpats:
1160 if filter not in self._filterpats:
1161 l = []
1161 l = []
1162 for pat, cmd in self.ui.configitems(filter):
1162 for pat, cmd in self.ui.configitems(filter):
1163 if cmd == '!':
1163 if cmd == '!':
1164 continue
1164 continue
1165 mf = matchmod.match(self.root, '', [pat])
1165 mf = matchmod.match(self.root, '', [pat])
1166 fn = None
1166 fn = None
1167 params = cmd
1167 params = cmd
1168 for name, filterfn in self._datafilters.iteritems():
1168 for name, filterfn in self._datafilters.iteritems():
1169 if cmd.startswith(name):
1169 if cmd.startswith(name):
1170 fn = filterfn
1170 fn = filterfn
1171 params = cmd[len(name):].lstrip()
1171 params = cmd[len(name):].lstrip()
1172 break
1172 break
1173 if not fn:
1173 if not fn:
1174 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1174 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1175 # Wrap old filters not supporting keyword arguments
1175 # Wrap old filters not supporting keyword arguments
1176 if not pycompat.getargspec(fn)[2]:
1176 if not pycompat.getargspec(fn)[2]:
1177 oldfn = fn
1177 oldfn = fn
1178 fn = lambda s, c, **kwargs: oldfn(s, c)
1178 fn = lambda s, c, **kwargs: oldfn(s, c)
1179 l.append((mf, fn, params))
1179 l.append((mf, fn, params))
1180 self._filterpats[filter] = l
1180 self._filterpats[filter] = l
1181 return self._filterpats[filter]
1181 return self._filterpats[filter]
1182
1182
1183 def _filter(self, filterpats, filename, data):
1183 def _filter(self, filterpats, filename, data):
1184 for mf, fn, cmd in filterpats:
1184 for mf, fn, cmd in filterpats:
1185 if mf(filename):
1185 if mf(filename):
1186 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1186 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1187 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1187 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1188 break
1188 break
1189
1189
1190 return data
1190 return data
1191
1191
1192 @unfilteredpropertycache
1192 @unfilteredpropertycache
1193 def _encodefilterpats(self):
1193 def _encodefilterpats(self):
1194 return self._loadfilter('encode')
1194 return self._loadfilter('encode')
1195
1195
1196 @unfilteredpropertycache
1196 @unfilteredpropertycache
1197 def _decodefilterpats(self):
1197 def _decodefilterpats(self):
1198 return self._loadfilter('decode')
1198 return self._loadfilter('decode')
1199
1199
1200 def adddatafilter(self, name, filter):
1200 def adddatafilter(self, name, filter):
1201 self._datafilters[name] = filter
1201 self._datafilters[name] = filter
1202
1202
1203 def wread(self, filename):
1203 def wread(self, filename):
1204 if self.wvfs.islink(filename):
1204 if self.wvfs.islink(filename):
1205 data = self.wvfs.readlink(filename)
1205 data = self.wvfs.readlink(filename)
1206 else:
1206 else:
1207 data = self.wvfs.read(filename)
1207 data = self.wvfs.read(filename)
1208 return self._filter(self._encodefilterpats, filename, data)
1208 return self._filter(self._encodefilterpats, filename, data)
1209
1209
1210 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1210 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1211 """write ``data`` into ``filename`` in the working directory
1211 """write ``data`` into ``filename`` in the working directory
1212
1212
1213 This returns length of written (maybe decoded) data.
1213 This returns length of written (maybe decoded) data.
1214 """
1214 """
1215 data = self._filter(self._decodefilterpats, filename, data)
1215 data = self._filter(self._decodefilterpats, filename, data)
1216 if 'l' in flags:
1216 if 'l' in flags:
1217 self.wvfs.symlink(data, filename)
1217 self.wvfs.symlink(data, filename)
1218 else:
1218 else:
1219 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1219 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1220 **kwargs)
1220 **kwargs)
1221 if 'x' in flags:
1221 if 'x' in flags:
1222 self.wvfs.setflags(filename, False, True)
1222 self.wvfs.setflags(filename, False, True)
1223 else:
1223 else:
1224 self.wvfs.setflags(filename, False, False)
1224 self.wvfs.setflags(filename, False, False)
1225 return len(data)
1225 return len(data)
1226
1226
1227 def wwritedata(self, filename, data):
1227 def wwritedata(self, filename, data):
1228 return self._filter(self._decodefilterpats, filename, data)
1228 return self._filter(self._decodefilterpats, filename, data)
1229
1229
1230 def currenttransaction(self):
1230 def currenttransaction(self):
1231 """return the current transaction or None if non exists"""
1231 """return the current transaction or None if non exists"""
1232 if self._transref:
1232 if self._transref:
1233 tr = self._transref()
1233 tr = self._transref()
1234 else:
1234 else:
1235 tr = None
1235 tr = None
1236
1236
1237 if tr and tr.running():
1237 if tr and tr.running():
1238 return tr
1238 return tr
1239 return None
1239 return None
1240
1240
1241 def transaction(self, desc, report=None):
1241 def transaction(self, desc, report=None):
1242 if (self.ui.configbool('devel', 'all-warnings')
1242 if (self.ui.configbool('devel', 'all-warnings')
1243 or self.ui.configbool('devel', 'check-locks')):
1243 or self.ui.configbool('devel', 'check-locks')):
1244 if self._currentlock(self._lockref) is None:
1244 if self._currentlock(self._lockref) is None:
1245 raise error.ProgrammingError('transaction requires locking')
1245 raise error.ProgrammingError('transaction requires locking')
1246 tr = self.currenttransaction()
1246 tr = self.currenttransaction()
1247 if tr is not None:
1247 if tr is not None:
1248 return tr.nest(name=desc)
1248 return tr.nest(name=desc)
1249
1249
1250 # abort here if the journal already exists
1250 # abort here if the journal already exists
1251 if self.svfs.exists("journal"):
1251 if self.svfs.exists("journal"):
1252 raise error.RepoError(
1252 raise error.RepoError(
1253 _("abandoned transaction found"),
1253 _("abandoned transaction found"),
1254 hint=_("run 'hg recover' to clean up transaction"))
1254 hint=_("run 'hg recover' to clean up transaction"))
1255
1255
1256 idbase = "%.40f#%f" % (random.random(), time.time())
1256 idbase = "%.40f#%f" % (random.random(), time.time())
1257 ha = hex(hashlib.sha1(idbase).digest())
1257 ha = hex(hashlib.sha1(idbase).digest())
1258 txnid = 'TXN:' + ha
1258 txnid = 'TXN:' + ha
1259 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1259 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1260
1260
1261 self._writejournal(desc)
1261 self._writejournal(desc)
1262 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1262 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1263 if report:
1263 if report:
1264 rp = report
1264 rp = report
1265 else:
1265 else:
1266 rp = self.ui.warn
1266 rp = self.ui.warn
1267 vfsmap = {'plain': self.vfs} # root of .hg/
1267 vfsmap = {'plain': self.vfs} # root of .hg/
1268 # we must avoid cyclic reference between repo and transaction.
1268 # we must avoid cyclic reference between repo and transaction.
1269 reporef = weakref.ref(self)
1269 reporef = weakref.ref(self)
1270 # Code to track tag movement
1270 # Code to track tag movement
1271 #
1271 #
1272 # Since tags are all handled as file content, it is actually quite hard
1272 # Since tags are all handled as file content, it is actually quite hard
1273 # to track these movement from a code perspective. So we fallback to a
1273 # to track these movement from a code perspective. So we fallback to a
1274 # tracking at the repository level. One could envision to track changes
1274 # tracking at the repository level. One could envision to track changes
1275 # to the '.hgtags' file through changegroup apply but that fails to
1275 # to the '.hgtags' file through changegroup apply but that fails to
1276 # cope with case where transaction expose new heads without changegroup
1276 # cope with case where transaction expose new heads without changegroup
1277 # being involved (eg: phase movement).
1277 # being involved (eg: phase movement).
1278 #
1278 #
1279 # For now, We gate the feature behind a flag since this likely comes
1279 # For now, We gate the feature behind a flag since this likely comes
1280 # with performance impacts. The current code run more often than needed
1280 # with performance impacts. The current code run more often than needed
1281 # and do not use caches as much as it could. The current focus is on
1281 # and do not use caches as much as it could. The current focus is on
1282 # the behavior of the feature so we disable it by default. The flag
1282 # the behavior of the feature so we disable it by default. The flag
1283 # will be removed when we are happy with the performance impact.
1283 # will be removed when we are happy with the performance impact.
1284 #
1284 #
1285 # Once this feature is no longer experimental move the following
1285 # Once this feature is no longer experimental move the following
1286 # documentation to the appropriate help section:
1286 # documentation to the appropriate help section:
1287 #
1287 #
1288 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1288 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1289 # tags (new or changed or deleted tags). In addition the details of
1289 # tags (new or changed or deleted tags). In addition the details of
1290 # these changes are made available in a file at:
1290 # these changes are made available in a file at:
1291 # ``REPOROOT/.hg/changes/tags.changes``.
1291 # ``REPOROOT/.hg/changes/tags.changes``.
1292 # Make sure you check for HG_TAG_MOVED before reading that file as it
1292 # Make sure you check for HG_TAG_MOVED before reading that file as it
1293 # might exist from a previous transaction even if no tag were touched
1293 # might exist from a previous transaction even if no tag were touched
1294 # in this one. Changes are recorded in a line base format::
1294 # in this one. Changes are recorded in a line base format::
1295 #
1295 #
1296 # <action> <hex-node> <tag-name>\n
1296 # <action> <hex-node> <tag-name>\n
1297 #
1297 #
1298 # Actions are defined as follow:
1298 # Actions are defined as follow:
1299 # "-R": tag is removed,
1299 # "-R": tag is removed,
1300 # "+A": tag is added,
1300 # "+A": tag is added,
1301 # "-M": tag is moved (old value),
1301 # "-M": tag is moved (old value),
1302 # "+M": tag is moved (new value),
1302 # "+M": tag is moved (new value),
1303 tracktags = lambda x: None
1303 tracktags = lambda x: None
1304 # experimental config: experimental.hook-track-tags
1304 # experimental config: experimental.hook-track-tags
1305 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1305 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1306 if desc != 'strip' and shouldtracktags:
1306 if desc != 'strip' and shouldtracktags:
1307 oldheads = self.changelog.headrevs()
1307 oldheads = self.changelog.headrevs()
1308 def tracktags(tr2):
1308 def tracktags(tr2):
1309 repo = reporef()
1309 repo = reporef()
1310 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1310 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1311 newheads = repo.changelog.headrevs()
1311 newheads = repo.changelog.headrevs()
1312 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1312 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1313 # notes: we compare lists here.
1313 # notes: we compare lists here.
1314 # As we do it only once buiding set would not be cheaper
1314 # As we do it only once buiding set would not be cheaper
1315 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1315 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1316 if changes:
1316 if changes:
1317 tr2.hookargs['tag_moved'] = '1'
1317 tr2.hookargs['tag_moved'] = '1'
1318 with repo.vfs('changes/tags.changes', 'w',
1318 with repo.vfs('changes/tags.changes', 'w',
1319 atomictemp=True) as changesfile:
1319 atomictemp=True) as changesfile:
1320 # note: we do not register the file to the transaction
1320 # note: we do not register the file to the transaction
1321 # because we needs it to still exist on the transaction
1321 # because we needs it to still exist on the transaction
1322 # is close (for txnclose hooks)
1322 # is close (for txnclose hooks)
1323 tagsmod.writediff(changesfile, changes)
1323 tagsmod.writediff(changesfile, changes)
1324 def validate(tr2):
1324 def validate(tr2):
1325 """will run pre-closing hooks"""
1325 """will run pre-closing hooks"""
1326 # XXX the transaction API is a bit lacking here so we take a hacky
1326 # XXX the transaction API is a bit lacking here so we take a hacky
1327 # path for now
1327 # path for now
1328 #
1328 #
1329 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1329 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1330 # dict is copied before these run. In addition we needs the data
1330 # dict is copied before these run. In addition we needs the data
1331 # available to in memory hooks too.
1331 # available to in memory hooks too.
1332 #
1332 #
1333 # Moreover, we also need to make sure this runs before txnclose
1333 # Moreover, we also need to make sure this runs before txnclose
1334 # hooks and there is no "pending" mechanism that would execute
1334 # hooks and there is no "pending" mechanism that would execute
1335 # logic only if hooks are about to run.
1335 # logic only if hooks are about to run.
1336 #
1336 #
1337 # Fixing this limitation of the transaction is also needed to track
1337 # Fixing this limitation of the transaction is also needed to track
1338 # other families of changes (bookmarks, phases, obsolescence).
1338 # other families of changes (bookmarks, phases, obsolescence).
1339 #
1339 #
1340 # This will have to be fixed before we remove the experimental
1340 # This will have to be fixed before we remove the experimental
1341 # gating.
1341 # gating.
1342 tracktags(tr2)
1342 tracktags(tr2)
1343 repo = reporef()
1343 repo = reporef()
1344 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1344 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1345 scmutil.enforcesinglehead(repo, tr2, desc)
1345 scmutil.enforcesinglehead(repo, tr2, desc)
1346 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1346 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1347 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1347 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1348 args = tr.hookargs.copy()
1348 args = tr.hookargs.copy()
1349 args.update(bookmarks.preparehookargs(name, old, new))
1349 args.update(bookmarks.preparehookargs(name, old, new))
1350 repo.hook('pretxnclose-bookmark', throw=True,
1350 repo.hook('pretxnclose-bookmark', throw=True,
1351 txnname=desc,
1351 txnname=desc,
1352 **pycompat.strkwargs(args))
1352 **pycompat.strkwargs(args))
1353 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1353 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1354 cl = repo.unfiltered().changelog
1354 cl = repo.unfiltered().changelog
1355 for rev, (old, new) in tr.changes['phases'].items():
1355 for rev, (old, new) in tr.changes['phases'].items():
1356 args = tr.hookargs.copy()
1356 args = tr.hookargs.copy()
1357 node = hex(cl.node(rev))
1357 node = hex(cl.node(rev))
1358 args.update(phases.preparehookargs(node, old, new))
1358 args.update(phases.preparehookargs(node, old, new))
1359 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1359 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1360 **pycompat.strkwargs(args))
1360 **pycompat.strkwargs(args))
1361
1361
1362 repo.hook('pretxnclose', throw=True,
1362 repo.hook('pretxnclose', throw=True,
1363 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1363 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1364 def releasefn(tr, success):
1364 def releasefn(tr, success):
1365 repo = reporef()
1365 repo = reporef()
1366 if success:
1366 if success:
1367 # this should be explicitly invoked here, because
1367 # this should be explicitly invoked here, because
1368 # in-memory changes aren't written out at closing
1368 # in-memory changes aren't written out at closing
1369 # transaction, if tr.addfilegenerator (via
1369 # transaction, if tr.addfilegenerator (via
1370 # dirstate.write or so) isn't invoked while
1370 # dirstate.write or so) isn't invoked while
1371 # transaction running
1371 # transaction running
1372 repo.dirstate.write(None)
1372 repo.dirstate.write(None)
1373 else:
1373 else:
1374 # discard all changes (including ones already written
1374 # discard all changes (including ones already written
1375 # out) in this transaction
1375 # out) in this transaction
1376 narrowspec.restorebackup(self, 'journal.narrowspec')
1376 narrowspec.restorebackup(self, 'journal.narrowspec')
1377 repo.dirstate.restorebackup(None, 'journal.dirstate')
1377 repo.dirstate.restorebackup(None, 'journal.dirstate')
1378
1378
1379 repo.invalidate(clearfilecache=True)
1379 repo.invalidate(clearfilecache=True)
1380
1380
1381 tr = transaction.transaction(rp, self.svfs, vfsmap,
1381 tr = transaction.transaction(rp, self.svfs, vfsmap,
1382 "journal",
1382 "journal",
1383 "undo",
1383 "undo",
1384 aftertrans(renames),
1384 aftertrans(renames),
1385 self.store.createmode,
1385 self.store.createmode,
1386 validator=validate,
1386 validator=validate,
1387 releasefn=releasefn,
1387 releasefn=releasefn,
1388 checkambigfiles=_cachedfiles,
1388 checkambigfiles=_cachedfiles,
1389 name=desc)
1389 name=desc)
1390 tr.changes['revs'] = pycompat.xrange(0, 0)
1390 tr.changes['revs'] = pycompat.xrange(0, 0)
1391 tr.changes['obsmarkers'] = set()
1391 tr.changes['obsmarkers'] = set()
1392 tr.changes['phases'] = {}
1392 tr.changes['phases'] = {}
1393 tr.changes['bookmarks'] = {}
1393 tr.changes['bookmarks'] = {}
1394
1394
1395 tr.hookargs['txnid'] = txnid
1395 tr.hookargs['txnid'] = txnid
1396 # note: writing the fncache only during finalize mean that the file is
1396 # note: writing the fncache only during finalize mean that the file is
1397 # outdated when running hooks. As fncache is used for streaming clone,
1397 # outdated when running hooks. As fncache is used for streaming clone,
1398 # this is not expected to break anything that happen during the hooks.
1398 # this is not expected to break anything that happen during the hooks.
1399 tr.addfinalize('flush-fncache', self.store.write)
1399 tr.addfinalize('flush-fncache', self.store.write)
1400 def txnclosehook(tr2):
1400 def txnclosehook(tr2):
1401 """To be run if transaction is successful, will schedule a hook run
1401 """To be run if transaction is successful, will schedule a hook run
1402 """
1402 """
1403 # Don't reference tr2 in hook() so we don't hold a reference.
1403 # Don't reference tr2 in hook() so we don't hold a reference.
1404 # This reduces memory consumption when there are multiple
1404 # This reduces memory consumption when there are multiple
1405 # transactions per lock. This can likely go away if issue5045
1405 # transactions per lock. This can likely go away if issue5045
1406 # fixes the function accumulation.
1406 # fixes the function accumulation.
1407 hookargs = tr2.hookargs
1407 hookargs = tr2.hookargs
1408
1408
1409 def hookfunc():
1409 def hookfunc():
1410 repo = reporef()
1410 repo = reporef()
1411 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1411 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1412 bmchanges = sorted(tr.changes['bookmarks'].items())
1412 bmchanges = sorted(tr.changes['bookmarks'].items())
1413 for name, (old, new) in bmchanges:
1413 for name, (old, new) in bmchanges:
1414 args = tr.hookargs.copy()
1414 args = tr.hookargs.copy()
1415 args.update(bookmarks.preparehookargs(name, old, new))
1415 args.update(bookmarks.preparehookargs(name, old, new))
1416 repo.hook('txnclose-bookmark', throw=False,
1416 repo.hook('txnclose-bookmark', throw=False,
1417 txnname=desc, **pycompat.strkwargs(args))
1417 txnname=desc, **pycompat.strkwargs(args))
1418
1418
1419 if hook.hashook(repo.ui, 'txnclose-phase'):
1419 if hook.hashook(repo.ui, 'txnclose-phase'):
1420 cl = repo.unfiltered().changelog
1420 cl = repo.unfiltered().changelog
1421 phasemv = sorted(tr.changes['phases'].items())
1421 phasemv = sorted(tr.changes['phases'].items())
1422 for rev, (old, new) in phasemv:
1422 for rev, (old, new) in phasemv:
1423 args = tr.hookargs.copy()
1423 args = tr.hookargs.copy()
1424 node = hex(cl.node(rev))
1424 node = hex(cl.node(rev))
1425 args.update(phases.preparehookargs(node, old, new))
1425 args.update(phases.preparehookargs(node, old, new))
1426 repo.hook('txnclose-phase', throw=False, txnname=desc,
1426 repo.hook('txnclose-phase', throw=False, txnname=desc,
1427 **pycompat.strkwargs(args))
1427 **pycompat.strkwargs(args))
1428
1428
1429 repo.hook('txnclose', throw=False, txnname=desc,
1429 repo.hook('txnclose', throw=False, txnname=desc,
1430 **pycompat.strkwargs(hookargs))
1430 **pycompat.strkwargs(hookargs))
1431 reporef()._afterlock(hookfunc)
1431 reporef()._afterlock(hookfunc)
1432 tr.addfinalize('txnclose-hook', txnclosehook)
1432 tr.addfinalize('txnclose-hook', txnclosehook)
1433 # Include a leading "-" to make it happen before the transaction summary
1433 # Include a leading "-" to make it happen before the transaction summary
1434 # reports registered via scmutil.registersummarycallback() whose names
1434 # reports registered via scmutil.registersummarycallback() whose names
1435 # are 00-txnreport etc. That way, the caches will be warm when the
1435 # are 00-txnreport etc. That way, the caches will be warm when the
1436 # callbacks run.
1436 # callbacks run.
1437 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1437 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1438 def txnaborthook(tr2):
1438 def txnaborthook(tr2):
1439 """To be run if transaction is aborted
1439 """To be run if transaction is aborted
1440 """
1440 """
1441 reporef().hook('txnabort', throw=False, txnname=desc,
1441 reporef().hook('txnabort', throw=False, txnname=desc,
1442 **pycompat.strkwargs(tr2.hookargs))
1442 **pycompat.strkwargs(tr2.hookargs))
1443 tr.addabort('txnabort-hook', txnaborthook)
1443 tr.addabort('txnabort-hook', txnaborthook)
1444 # avoid eager cache invalidation. in-memory data should be identical
1444 # avoid eager cache invalidation. in-memory data should be identical
1445 # to stored data if transaction has no error.
1445 # to stored data if transaction has no error.
1446 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1446 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1447 self._transref = weakref.ref(tr)
1447 self._transref = weakref.ref(tr)
1448 scmutil.registersummarycallback(self, tr, desc)
1448 scmutil.registersummarycallback(self, tr, desc)
1449 return tr
1449 return tr
1450
1450
1451 def _journalfiles(self):
1451 def _journalfiles(self):
1452 return ((self.svfs, 'journal'),
1452 return ((self.svfs, 'journal'),
1453 (self.vfs, 'journal.dirstate'),
1453 (self.vfs, 'journal.dirstate'),
1454 (self.vfs, 'journal.branch'),
1454 (self.vfs, 'journal.branch'),
1455 (self.vfs, 'journal.desc'),
1455 (self.vfs, 'journal.desc'),
1456 (self.vfs, 'journal.bookmarks'),
1456 (self.vfs, 'journal.bookmarks'),
1457 (self.svfs, 'journal.phaseroots'))
1457 (self.svfs, 'journal.phaseroots'))
1458
1458
1459 def undofiles(self):
1459 def undofiles(self):
1460 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1460 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1461
1461
1462 @unfilteredmethod
1462 @unfilteredmethod
1463 def _writejournal(self, desc):
1463 def _writejournal(self, desc):
1464 self.dirstate.savebackup(None, 'journal.dirstate')
1464 self.dirstate.savebackup(None, 'journal.dirstate')
1465 narrowspec.savebackup(self, 'journal.narrowspec')
1465 narrowspec.savebackup(self, 'journal.narrowspec')
1466 self.vfs.write("journal.branch",
1466 self.vfs.write("journal.branch",
1467 encoding.fromlocal(self.dirstate.branch()))
1467 encoding.fromlocal(self.dirstate.branch()))
1468 self.vfs.write("journal.desc",
1468 self.vfs.write("journal.desc",
1469 "%d\n%s\n" % (len(self), desc))
1469 "%d\n%s\n" % (len(self), desc))
1470 self.vfs.write("journal.bookmarks",
1470 self.vfs.write("journal.bookmarks",
1471 self.vfs.tryread("bookmarks"))
1471 self.vfs.tryread("bookmarks"))
1472 self.svfs.write("journal.phaseroots",
1472 self.svfs.write("journal.phaseroots",
1473 self.svfs.tryread("phaseroots"))
1473 self.svfs.tryread("phaseroots"))
1474
1474
1475 def recover(self):
1475 def recover(self):
1476 with self.lock():
1476 with self.lock():
1477 if self.svfs.exists("journal"):
1477 if self.svfs.exists("journal"):
1478 self.ui.status(_("rolling back interrupted transaction\n"))
1478 self.ui.status(_("rolling back interrupted transaction\n"))
1479 vfsmap = {'': self.svfs,
1479 vfsmap = {'': self.svfs,
1480 'plain': self.vfs,}
1480 'plain': self.vfs,}
1481 transaction.rollback(self.svfs, vfsmap, "journal",
1481 transaction.rollback(self.svfs, vfsmap, "journal",
1482 self.ui.warn,
1482 self.ui.warn,
1483 checkambigfiles=_cachedfiles)
1483 checkambigfiles=_cachedfiles)
1484 self.invalidate()
1484 self.invalidate()
1485 return True
1485 return True
1486 else:
1486 else:
1487 self.ui.warn(_("no interrupted transaction available\n"))
1487 self.ui.warn(_("no interrupted transaction available\n"))
1488 return False
1488 return False
1489
1489
1490 def rollback(self, dryrun=False, force=False):
1490 def rollback(self, dryrun=False, force=False):
1491 wlock = lock = dsguard = None
1491 wlock = lock = dsguard = None
1492 try:
1492 try:
1493 wlock = self.wlock()
1493 wlock = self.wlock()
1494 lock = self.lock()
1494 lock = self.lock()
1495 if self.svfs.exists("undo"):
1495 if self.svfs.exists("undo"):
1496 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1496 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1497
1497
1498 return self._rollback(dryrun, force, dsguard)
1498 return self._rollback(dryrun, force, dsguard)
1499 else:
1499 else:
1500 self.ui.warn(_("no rollback information available\n"))
1500 self.ui.warn(_("no rollback information available\n"))
1501 return 1
1501 return 1
1502 finally:
1502 finally:
1503 release(dsguard, lock, wlock)
1503 release(dsguard, lock, wlock)
1504
1504
1505 @unfilteredmethod # Until we get smarter cache management
1505 @unfilteredmethod # Until we get smarter cache management
1506 def _rollback(self, dryrun, force, dsguard):
1506 def _rollback(self, dryrun, force, dsguard):
1507 ui = self.ui
1507 ui = self.ui
1508 try:
1508 try:
1509 args = self.vfs.read('undo.desc').splitlines()
1509 args = self.vfs.read('undo.desc').splitlines()
1510 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1510 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1511 if len(args) >= 3:
1511 if len(args) >= 3:
1512 detail = args[2]
1512 detail = args[2]
1513 oldtip = oldlen - 1
1513 oldtip = oldlen - 1
1514
1514
1515 if detail and ui.verbose:
1515 if detail and ui.verbose:
1516 msg = (_('repository tip rolled back to revision %d'
1516 msg = (_('repository tip rolled back to revision %d'
1517 ' (undo %s: %s)\n')
1517 ' (undo %s: %s)\n')
1518 % (oldtip, desc, detail))
1518 % (oldtip, desc, detail))
1519 else:
1519 else:
1520 msg = (_('repository tip rolled back to revision %d'
1520 msg = (_('repository tip rolled back to revision %d'
1521 ' (undo %s)\n')
1521 ' (undo %s)\n')
1522 % (oldtip, desc))
1522 % (oldtip, desc))
1523 except IOError:
1523 except IOError:
1524 msg = _('rolling back unknown transaction\n')
1524 msg = _('rolling back unknown transaction\n')
1525 desc = None
1525 desc = None
1526
1526
1527 if not force and self['.'] != self['tip'] and desc == 'commit':
1527 if not force and self['.'] != self['tip'] and desc == 'commit':
1528 raise error.Abort(
1528 raise error.Abort(
1529 _('rollback of last commit while not checked out '
1529 _('rollback of last commit while not checked out '
1530 'may lose data'), hint=_('use -f to force'))
1530 'may lose data'), hint=_('use -f to force'))
1531
1531
1532 ui.status(msg)
1532 ui.status(msg)
1533 if dryrun:
1533 if dryrun:
1534 return 0
1534 return 0
1535
1535
1536 parents = self.dirstate.parents()
1536 parents = self.dirstate.parents()
1537 self.destroying()
1537 self.destroying()
1538 vfsmap = {'plain': self.vfs, '': self.svfs}
1538 vfsmap = {'plain': self.vfs, '': self.svfs}
1539 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1539 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1540 checkambigfiles=_cachedfiles)
1540 checkambigfiles=_cachedfiles)
1541 if self.vfs.exists('undo.bookmarks'):
1541 if self.vfs.exists('undo.bookmarks'):
1542 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1542 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1543 if self.svfs.exists('undo.phaseroots'):
1543 if self.svfs.exists('undo.phaseroots'):
1544 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1544 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1545 self.invalidate()
1545 self.invalidate()
1546
1546
1547 parentgone = (parents[0] not in self.changelog.nodemap or
1547 parentgone = (parents[0] not in self.changelog.nodemap or
1548 parents[1] not in self.changelog.nodemap)
1548 parents[1] not in self.changelog.nodemap)
1549 if parentgone:
1549 if parentgone:
1550 # prevent dirstateguard from overwriting already restored one
1550 # prevent dirstateguard from overwriting already restored one
1551 dsguard.close()
1551 dsguard.close()
1552
1552
1553 narrowspec.restorebackup(self, 'undo.narrowspec')
1553 narrowspec.restorebackup(self, 'undo.narrowspec')
1554 self.dirstate.restorebackup(None, 'undo.dirstate')
1554 self.dirstate.restorebackup(None, 'undo.dirstate')
1555 try:
1555 try:
1556 branch = self.vfs.read('undo.branch')
1556 branch = self.vfs.read('undo.branch')
1557 self.dirstate.setbranch(encoding.tolocal(branch))
1557 self.dirstate.setbranch(encoding.tolocal(branch))
1558 except IOError:
1558 except IOError:
1559 ui.warn(_('named branch could not be reset: '
1559 ui.warn(_('named branch could not be reset: '
1560 'current branch is still \'%s\'\n')
1560 'current branch is still \'%s\'\n')
1561 % self.dirstate.branch())
1561 % self.dirstate.branch())
1562
1562
1563 parents = tuple([p.rev() for p in self[None].parents()])
1563 parents = tuple([p.rev() for p in self[None].parents()])
1564 if len(parents) > 1:
1564 if len(parents) > 1:
1565 ui.status(_('working directory now based on '
1565 ui.status(_('working directory now based on '
1566 'revisions %d and %d\n') % parents)
1566 'revisions %d and %d\n') % parents)
1567 else:
1567 else:
1568 ui.status(_('working directory now based on '
1568 ui.status(_('working directory now based on '
1569 'revision %d\n') % parents)
1569 'revision %d\n') % parents)
1570 mergemod.mergestate.clean(self, self['.'].node())
1570 mergemod.mergestate.clean(self, self['.'].node())
1571
1571
1572 # TODO: if we know which new heads may result from this rollback, pass
1572 # TODO: if we know which new heads may result from this rollback, pass
1573 # them to destroy(), which will prevent the branchhead cache from being
1573 # them to destroy(), which will prevent the branchhead cache from being
1574 # invalidated.
1574 # invalidated.
1575 self.destroyed()
1575 self.destroyed()
1576 return 0
1576 return 0
1577
1577
1578 def _buildcacheupdater(self, newtransaction):
1578 def _buildcacheupdater(self, newtransaction):
1579 """called during transaction to build the callback updating cache
1579 """called during transaction to build the callback updating cache
1580
1580
1581 Lives on the repository to help extension who might want to augment
1581 Lives on the repository to help extension who might want to augment
1582 this logic. For this purpose, the created transaction is passed to the
1582 this logic. For this purpose, the created transaction is passed to the
1583 method.
1583 method.
1584 """
1584 """
1585 # we must avoid cyclic reference between repo and transaction.
1585 # we must avoid cyclic reference between repo and transaction.
1586 reporef = weakref.ref(self)
1586 reporef = weakref.ref(self)
1587 def updater(tr):
1587 def updater(tr):
1588 repo = reporef()
1588 repo = reporef()
1589 repo.updatecaches(tr)
1589 repo.updatecaches(tr)
1590 return updater
1590 return updater
1591
1591
1592 @unfilteredmethod
1592 @unfilteredmethod
1593 def updatecaches(self, tr=None, full=False):
1593 def updatecaches(self, tr=None, full=False):
1594 """warm appropriate caches
1594 """warm appropriate caches
1595
1595
1596 If this function is called after a transaction closed. The transaction
1596 If this function is called after a transaction closed. The transaction
1597 will be available in the 'tr' argument. This can be used to selectively
1597 will be available in the 'tr' argument. This can be used to selectively
1598 update caches relevant to the changes in that transaction.
1598 update caches relevant to the changes in that transaction.
1599
1599
1600 If 'full' is set, make sure all caches the function knows about have
1600 If 'full' is set, make sure all caches the function knows about have
1601 up-to-date data. Even the ones usually loaded more lazily.
1601 up-to-date data. Even the ones usually loaded more lazily.
1602 """
1602 """
1603 if tr is not None and tr.hookargs.get('source') == 'strip':
1603 if tr is not None and tr.hookargs.get('source') == 'strip':
1604 # During strip, many caches are invalid but
1604 # During strip, many caches are invalid but
1605 # later call to `destroyed` will refresh them.
1605 # later call to `destroyed` will refresh them.
1606 return
1606 return
1607
1607
1608 if tr is None or tr.changes['revs']:
1608 if tr is None or tr.changes['revs']:
1609 # updating the unfiltered branchmap should refresh all the others,
1609 # updating the unfiltered branchmap should refresh all the others,
1610 self.ui.debug('updating the branch cache\n')
1610 self.ui.debug('updating the branch cache\n')
1611 branchmap.updatecache(self.filtered('served'))
1611 branchmap.updatecache(self.filtered('served'))
1612
1612
1613 if full:
1613 if full:
1614 rbc = self.revbranchcache()
1614 rbc = self.revbranchcache()
1615 for r in self.changelog:
1615 for r in self.changelog:
1616 rbc.branchinfo(r)
1616 rbc.branchinfo(r)
1617 rbc.write()
1617 rbc.write()
1618
1618
1619 # ensure the working copy parents are in the manifestfulltextcache
1619 # ensure the working copy parents are in the manifestfulltextcache
1620 for ctx in self['.'].parents():
1620 for ctx in self['.'].parents():
1621 ctx.manifest() # accessing the manifest is enough
1621 ctx.manifest() # accessing the manifest is enough
1622
1622
1623 def invalidatecaches(self):
1623 def invalidatecaches(self):
1624
1624
1625 if '_tagscache' in vars(self):
1625 if '_tagscache' in vars(self):
1626 # can't use delattr on proxy
1626 # can't use delattr on proxy
1627 del self.__dict__['_tagscache']
1627 del self.__dict__['_tagscache']
1628
1628
1629 self.unfiltered()._branchcaches.clear()
1629 self.unfiltered()._branchcaches.clear()
1630 self.invalidatevolatilesets()
1630 self.invalidatevolatilesets()
1631 self._sparsesignaturecache.clear()
1631 self._sparsesignaturecache.clear()
1632
1632
1633 def invalidatevolatilesets(self):
1633 def invalidatevolatilesets(self):
1634 self.filteredrevcache.clear()
1634 self.filteredrevcache.clear()
1635 obsolete.clearobscaches(self)
1635 obsolete.clearobscaches(self)
1636
1636
1637 def invalidatedirstate(self):
1637 def invalidatedirstate(self):
1638 '''Invalidates the dirstate, causing the next call to dirstate
1638 '''Invalidates the dirstate, causing the next call to dirstate
1639 to check if it was modified since the last time it was read,
1639 to check if it was modified since the last time it was read,
1640 rereading it if it has.
1640 rereading it if it has.
1641
1641
1642 This is different to dirstate.invalidate() that it doesn't always
1642 This is different to dirstate.invalidate() that it doesn't always
1643 rereads the dirstate. Use dirstate.invalidate() if you want to
1643 rereads the dirstate. Use dirstate.invalidate() if you want to
1644 explicitly read the dirstate again (i.e. restoring it to a previous
1644 explicitly read the dirstate again (i.e. restoring it to a previous
1645 known good state).'''
1645 known good state).'''
1646 if hasunfilteredcache(self, 'dirstate'):
1646 if hasunfilteredcache(self, 'dirstate'):
1647 for k in self.dirstate._filecache:
1647 for k in self.dirstate._filecache:
1648 try:
1648 try:
1649 delattr(self.dirstate, k)
1649 delattr(self.dirstate, k)
1650 except AttributeError:
1650 except AttributeError:
1651 pass
1651 pass
1652 delattr(self.unfiltered(), 'dirstate')
1652 delattr(self.unfiltered(), 'dirstate')
1653
1653
1654 def invalidate(self, clearfilecache=False):
1654 def invalidate(self, clearfilecache=False):
1655 '''Invalidates both store and non-store parts other than dirstate
1655 '''Invalidates both store and non-store parts other than dirstate
1656
1656
1657 If a transaction is running, invalidation of store is omitted,
1657 If a transaction is running, invalidation of store is omitted,
1658 because discarding in-memory changes might cause inconsistency
1658 because discarding in-memory changes might cause inconsistency
1659 (e.g. incomplete fncache causes unintentional failure, but
1659 (e.g. incomplete fncache causes unintentional failure, but
1660 redundant one doesn't).
1660 redundant one doesn't).
1661 '''
1661 '''
1662 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1662 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1663 for k in list(self._filecache.keys()):
1663 for k in list(self._filecache.keys()):
1664 # dirstate is invalidated separately in invalidatedirstate()
1664 # dirstate is invalidated separately in invalidatedirstate()
1665 if k == 'dirstate':
1665 if k == 'dirstate':
1666 continue
1666 continue
1667 if (k == 'changelog' and
1667 if (k == 'changelog' and
1668 self.currenttransaction() and
1668 self.currenttransaction() and
1669 self.changelog._delayed):
1669 self.changelog._delayed):
1670 # The changelog object may store unwritten revisions. We don't
1670 # The changelog object may store unwritten revisions. We don't
1671 # want to lose them.
1671 # want to lose them.
1672 # TODO: Solve the problem instead of working around it.
1672 # TODO: Solve the problem instead of working around it.
1673 continue
1673 continue
1674
1674
1675 if clearfilecache:
1675 if clearfilecache:
1676 del self._filecache[k]
1676 del self._filecache[k]
1677 try:
1677 try:
1678 delattr(unfiltered, k)
1678 delattr(unfiltered, k)
1679 except AttributeError:
1679 except AttributeError:
1680 pass
1680 pass
1681 self.invalidatecaches()
1681 self.invalidatecaches()
1682 if not self.currenttransaction():
1682 if not self.currenttransaction():
1683 # TODO: Changing contents of store outside transaction
1683 # TODO: Changing contents of store outside transaction
1684 # causes inconsistency. We should make in-memory store
1684 # causes inconsistency. We should make in-memory store
1685 # changes detectable, and abort if changed.
1685 # changes detectable, and abort if changed.
1686 self.store.invalidatecaches()
1686 self.store.invalidatecaches()
1687
1687
1688 def invalidateall(self):
1688 def invalidateall(self):
1689 '''Fully invalidates both store and non-store parts, causing the
1689 '''Fully invalidates both store and non-store parts, causing the
1690 subsequent operation to reread any outside changes.'''
1690 subsequent operation to reread any outside changes.'''
1691 # extension should hook this to invalidate its caches
1691 # extension should hook this to invalidate its caches
1692 self.invalidate()
1692 self.invalidate()
1693 self.invalidatedirstate()
1693 self.invalidatedirstate()
1694
1694
1695 @unfilteredmethod
1695 @unfilteredmethod
1696 def _refreshfilecachestats(self, tr):
1696 def _refreshfilecachestats(self, tr):
1697 """Reload stats of cached files so that they are flagged as valid"""
1697 """Reload stats of cached files so that they are flagged as valid"""
1698 for k, ce in self._filecache.items():
1698 for k, ce in self._filecache.items():
1699 k = pycompat.sysstr(k)
1699 k = pycompat.sysstr(k)
1700 if k == r'dirstate' or k not in self.__dict__:
1700 if k == r'dirstate' or k not in self.__dict__:
1701 continue
1701 continue
1702 ce.refresh()
1702 ce.refresh()
1703
1703
1704 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1704 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1705 inheritchecker=None, parentenvvar=None):
1705 inheritchecker=None, parentenvvar=None):
1706 parentlock = None
1706 parentlock = None
1707 # the contents of parentenvvar are used by the underlying lock to
1707 # the contents of parentenvvar are used by the underlying lock to
1708 # determine whether it can be inherited
1708 # determine whether it can be inherited
1709 if parentenvvar is not None:
1709 if parentenvvar is not None:
1710 parentlock = encoding.environ.get(parentenvvar)
1710 parentlock = encoding.environ.get(parentenvvar)
1711
1711
1712 timeout = 0
1712 timeout = 0
1713 warntimeout = 0
1713 warntimeout = 0
1714 if wait:
1714 if wait:
1715 timeout = self.ui.configint("ui", "timeout")
1715 timeout = self.ui.configint("ui", "timeout")
1716 warntimeout = self.ui.configint("ui", "timeout.warn")
1716 warntimeout = self.ui.configint("ui", "timeout.warn")
1717 # internal config: ui.signal-safe-lock
1717 # internal config: ui.signal-safe-lock
1718 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1718 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1719
1719
1720 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1720 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1721 releasefn=releasefn,
1721 releasefn=releasefn,
1722 acquirefn=acquirefn, desc=desc,
1722 acquirefn=acquirefn, desc=desc,
1723 inheritchecker=inheritchecker,
1723 inheritchecker=inheritchecker,
1724 parentlock=parentlock,
1724 parentlock=parentlock,
1725 signalsafe=signalsafe)
1725 signalsafe=signalsafe)
1726 return l
1726 return l
1727
1727
1728 def _afterlock(self, callback):
1728 def _afterlock(self, callback):
1729 """add a callback to be run when the repository is fully unlocked
1729 """add a callback to be run when the repository is fully unlocked
1730
1730
1731 The callback will be executed when the outermost lock is released
1731 The callback will be executed when the outermost lock is released
1732 (with wlock being higher level than 'lock')."""
1732 (with wlock being higher level than 'lock')."""
1733 for ref in (self._wlockref, self._lockref):
1733 for ref in (self._wlockref, self._lockref):
1734 l = ref and ref()
1734 l = ref and ref()
1735 if l and l.held:
1735 if l and l.held:
1736 l.postrelease.append(callback)
1736 l.postrelease.append(callback)
1737 break
1737 break
1738 else: # no lock have been found.
1738 else: # no lock have been found.
1739 callback()
1739 callback()
1740
1740
1741 def lock(self, wait=True):
1741 def lock(self, wait=True):
1742 '''Lock the repository store (.hg/store) and return a weak reference
1742 '''Lock the repository store (.hg/store) and return a weak reference
1743 to the lock. Use this before modifying the store (e.g. committing or
1743 to the lock. Use this before modifying the store (e.g. committing or
1744 stripping). If you are opening a transaction, get a lock as well.)
1744 stripping). If you are opening a transaction, get a lock as well.)
1745
1745
1746 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1746 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1747 'wlock' first to avoid a dead-lock hazard.'''
1747 'wlock' first to avoid a dead-lock hazard.'''
1748 l = self._currentlock(self._lockref)
1748 l = self._currentlock(self._lockref)
1749 if l is not None:
1749 if l is not None:
1750 l.lock()
1750 l.lock()
1751 return l
1751 return l
1752
1752
1753 l = self._lock(self.svfs, "lock", wait, None,
1753 l = self._lock(self.svfs, "lock", wait, None,
1754 self.invalidate, _('repository %s') % self.origroot)
1754 self.invalidate, _('repository %s') % self.origroot)
1755 self._lockref = weakref.ref(l)
1755 self._lockref = weakref.ref(l)
1756 return l
1756 return l
1757
1757
1758 def _wlockchecktransaction(self):
1758 def _wlockchecktransaction(self):
1759 if self.currenttransaction() is not None:
1759 if self.currenttransaction() is not None:
1760 raise error.LockInheritanceContractViolation(
1760 raise error.LockInheritanceContractViolation(
1761 'wlock cannot be inherited in the middle of a transaction')
1761 'wlock cannot be inherited in the middle of a transaction')
1762
1762
1763 def wlock(self, wait=True):
1763 def wlock(self, wait=True):
1764 '''Lock the non-store parts of the repository (everything under
1764 '''Lock the non-store parts of the repository (everything under
1765 .hg except .hg/store) and return a weak reference to the lock.
1765 .hg except .hg/store) and return a weak reference to the lock.
1766
1766
1767 Use this before modifying files in .hg.
1767 Use this before modifying files in .hg.
1768
1768
1769 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1769 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1770 'wlock' first to avoid a dead-lock hazard.'''
1770 'wlock' first to avoid a dead-lock hazard.'''
1771 l = self._wlockref and self._wlockref()
1771 l = self._wlockref and self._wlockref()
1772 if l is not None and l.held:
1772 if l is not None and l.held:
1773 l.lock()
1773 l.lock()
1774 return l
1774 return l
1775
1775
1776 # We do not need to check for non-waiting lock acquisition. Such
1776 # We do not need to check for non-waiting lock acquisition. Such
1777 # acquisition would not cause dead-lock as they would just fail.
1777 # acquisition would not cause dead-lock as they would just fail.
1778 if wait and (self.ui.configbool('devel', 'all-warnings')
1778 if wait and (self.ui.configbool('devel', 'all-warnings')
1779 or self.ui.configbool('devel', 'check-locks')):
1779 or self.ui.configbool('devel', 'check-locks')):
1780 if self._currentlock(self._lockref) is not None:
1780 if self._currentlock(self._lockref) is not None:
1781 self.ui.develwarn('"wlock" acquired after "lock"')
1781 self.ui.develwarn('"wlock" acquired after "lock"')
1782
1782
1783 def unlock():
1783 def unlock():
1784 if self.dirstate.pendingparentchange():
1784 if self.dirstate.pendingparentchange():
1785 self.dirstate.invalidate()
1785 self.dirstate.invalidate()
1786 else:
1786 else:
1787 self.dirstate.write(None)
1787 self.dirstate.write(None)
1788
1788
1789 self._filecache['dirstate'].refresh()
1789 self._filecache['dirstate'].refresh()
1790
1790
1791 l = self._lock(self.vfs, "wlock", wait, unlock,
1791 l = self._lock(self.vfs, "wlock", wait, unlock,
1792 self.invalidatedirstate, _('working directory of %s') %
1792 self.invalidatedirstate, _('working directory of %s') %
1793 self.origroot,
1793 self.origroot,
1794 inheritchecker=self._wlockchecktransaction,
1794 inheritchecker=self._wlockchecktransaction,
1795 parentenvvar='HG_WLOCK_LOCKER')
1795 parentenvvar='HG_WLOCK_LOCKER')
1796 self._wlockref = weakref.ref(l)
1796 self._wlockref = weakref.ref(l)
1797 return l
1797 return l
1798
1798
1799 def _currentlock(self, lockref):
1799 def _currentlock(self, lockref):
1800 """Returns the lock if it's held, or None if it's not."""
1800 """Returns the lock if it's held, or None if it's not."""
1801 if lockref is None:
1801 if lockref is None:
1802 return None
1802 return None
1803 l = lockref()
1803 l = lockref()
1804 if l is None or not l.held:
1804 if l is None or not l.held:
1805 return None
1805 return None
1806 return l
1806 return l
1807
1807
1808 def currentwlock(self):
1808 def currentwlock(self):
1809 """Returns the wlock if it's held, or None if it's not."""
1809 """Returns the wlock if it's held, or None if it's not."""
1810 return self._currentlock(self._wlockref)
1810 return self._currentlock(self._wlockref)
1811
1811
1812 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1812 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1813 """
1813 """
1814 commit an individual file as part of a larger transaction
1814 commit an individual file as part of a larger transaction
1815 """
1815 """
1816
1816
1817 fname = fctx.path()
1817 fname = fctx.path()
1818 fparent1 = manifest1.get(fname, nullid)
1818 fparent1 = manifest1.get(fname, nullid)
1819 fparent2 = manifest2.get(fname, nullid)
1819 fparent2 = manifest2.get(fname, nullid)
1820 if isinstance(fctx, context.filectx):
1820 if isinstance(fctx, context.filectx):
1821 node = fctx.filenode()
1821 node = fctx.filenode()
1822 if node in [fparent1, fparent2]:
1822 if node in [fparent1, fparent2]:
1823 self.ui.debug('reusing %s filelog entry\n' % fname)
1823 self.ui.debug('reusing %s filelog entry\n' % fname)
1824 if manifest1.flags(fname) != fctx.flags():
1824 if manifest1.flags(fname) != fctx.flags():
1825 changelist.append(fname)
1825 changelist.append(fname)
1826 return node
1826 return node
1827
1827
1828 flog = self.file(fname)
1828 flog = self.file(fname)
1829 meta = {}
1829 meta = {}
1830 copy = fctx.renamed()
1830 copy = fctx.renamed()
1831 if copy and copy[0] != fname:
1831 if copy and copy[0] != fname:
1832 # Mark the new revision of this file as a copy of another
1832 # Mark the new revision of this file as a copy of another
1833 # file. This copy data will effectively act as a parent
1833 # file. This copy data will effectively act as a parent
1834 # of this new revision. If this is a merge, the first
1834 # of this new revision. If this is a merge, the first
1835 # parent will be the nullid (meaning "look up the copy data")
1835 # parent will be the nullid (meaning "look up the copy data")
1836 # and the second one will be the other parent. For example:
1836 # and the second one will be the other parent. For example:
1837 #
1837 #
1838 # 0 --- 1 --- 3 rev1 changes file foo
1838 # 0 --- 1 --- 3 rev1 changes file foo
1839 # \ / rev2 renames foo to bar and changes it
1839 # \ / rev2 renames foo to bar and changes it
1840 # \- 2 -/ rev3 should have bar with all changes and
1840 # \- 2 -/ rev3 should have bar with all changes and
1841 # should record that bar descends from
1841 # should record that bar descends from
1842 # bar in rev2 and foo in rev1
1842 # bar in rev2 and foo in rev1
1843 #
1843 #
1844 # this allows this merge to succeed:
1844 # this allows this merge to succeed:
1845 #
1845 #
1846 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1846 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1847 # \ / merging rev3 and rev4 should use bar@rev2
1847 # \ / merging rev3 and rev4 should use bar@rev2
1848 # \- 2 --- 4 as the merge base
1848 # \- 2 --- 4 as the merge base
1849 #
1849 #
1850
1850
1851 cfname = copy[0]
1851 cfname = copy[0]
1852 crev = manifest1.get(cfname)
1852 crev = manifest1.get(cfname)
1853 newfparent = fparent2
1853 newfparent = fparent2
1854
1854
1855 if manifest2: # branch merge
1855 if manifest2: # branch merge
1856 if fparent2 == nullid or crev is None: # copied on remote side
1856 if fparent2 == nullid or crev is None: # copied on remote side
1857 if cfname in manifest2:
1857 if cfname in manifest2:
1858 crev = manifest2[cfname]
1858 crev = manifest2[cfname]
1859 newfparent = fparent1
1859 newfparent = fparent1
1860
1860
1861 # Here, we used to search backwards through history to try to find
1861 # Here, we used to search backwards through history to try to find
1862 # where the file copy came from if the source of a copy was not in
1862 # where the file copy came from if the source of a copy was not in
1863 # the parent directory. However, this doesn't actually make sense to
1863 # the parent directory. However, this doesn't actually make sense to
1864 # do (what does a copy from something not in your working copy even
1864 # do (what does a copy from something not in your working copy even
1865 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1865 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1866 # the user that copy information was dropped, so if they didn't
1866 # the user that copy information was dropped, so if they didn't
1867 # expect this outcome it can be fixed, but this is the correct
1867 # expect this outcome it can be fixed, but this is the correct
1868 # behavior in this circumstance.
1868 # behavior in this circumstance.
1869
1869
1870 if crev:
1870 if crev:
1871 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1871 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1872 meta["copy"] = cfname
1872 meta["copy"] = cfname
1873 meta["copyrev"] = hex(crev)
1873 meta["copyrev"] = hex(crev)
1874 fparent1, fparent2 = nullid, newfparent
1874 fparent1, fparent2 = nullid, newfparent
1875 else:
1875 else:
1876 self.ui.warn(_("warning: can't find ancestor for '%s' "
1876 self.ui.warn(_("warning: can't find ancestor for '%s' "
1877 "copied from '%s'!\n") % (fname, cfname))
1877 "copied from '%s'!\n") % (fname, cfname))
1878
1878
1879 elif fparent1 == nullid:
1879 elif fparent1 == nullid:
1880 fparent1, fparent2 = fparent2, nullid
1880 fparent1, fparent2 = fparent2, nullid
1881 elif fparent2 != nullid:
1881 elif fparent2 != nullid:
1882 # is one parent an ancestor of the other?
1882 # is one parent an ancestor of the other?
1883 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1883 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1884 if fparent1 in fparentancestors:
1884 if fparent1 in fparentancestors:
1885 fparent1, fparent2 = fparent2, nullid
1885 fparent1, fparent2 = fparent2, nullid
1886 elif fparent2 in fparentancestors:
1886 elif fparent2 in fparentancestors:
1887 fparent2 = nullid
1887 fparent2 = nullid
1888
1888
1889 # is the file changed?
1889 # is the file changed?
1890 text = fctx.data()
1890 text = fctx.data()
1891 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1891 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1892 changelist.append(fname)
1892 changelist.append(fname)
1893 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1893 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1894 # are just the flags changed during merge?
1894 # are just the flags changed during merge?
1895 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1895 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1896 changelist.append(fname)
1896 changelist.append(fname)
1897
1897
1898 return fparent1
1898 return fparent1
1899
1899
1900 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1900 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1901 """check for commit arguments that aren't committable"""
1901 """check for commit arguments that aren't committable"""
1902 if match.isexact() or match.prefix():
1902 if match.isexact() or match.prefix():
1903 matched = set(status.modified + status.added + status.removed)
1903 matched = set(status.modified + status.added + status.removed)
1904
1904
1905 for f in match.files():
1905 for f in match.files():
1906 f = self.dirstate.normalize(f)
1906 f = self.dirstate.normalize(f)
1907 if f == '.' or f in matched or f in wctx.substate:
1907 if f == '.' or f in matched or f in wctx.substate:
1908 continue
1908 continue
1909 if f in status.deleted:
1909 if f in status.deleted:
1910 fail(f, _('file not found!'))
1910 fail(f, _('file not found!'))
1911 if f in vdirs: # visited directory
1911 if f in vdirs: # visited directory
1912 d = f + '/'
1912 d = f + '/'
1913 for mf in matched:
1913 for mf in matched:
1914 if mf.startswith(d):
1914 if mf.startswith(d):
1915 break
1915 break
1916 else:
1916 else:
1917 fail(f, _("no match under directory!"))
1917 fail(f, _("no match under directory!"))
1918 elif f not in self.dirstate:
1918 elif f not in self.dirstate:
1919 fail(f, _("file not tracked!"))
1919 fail(f, _("file not tracked!"))
1920
1920
1921 @unfilteredmethod
1921 @unfilteredmethod
1922 def commit(self, text="", user=None, date=None, match=None, force=False,
1922 def commit(self, text="", user=None, date=None, match=None, force=False,
1923 editor=False, extra=None):
1923 editor=False, extra=None):
1924 """Add a new revision to current repository.
1924 """Add a new revision to current repository.
1925
1925
1926 Revision information is gathered from the working directory,
1926 Revision information is gathered from the working directory,
1927 match can be used to filter the committed files. If editor is
1927 match can be used to filter the committed files. If editor is
1928 supplied, it is called to get a commit message.
1928 supplied, it is called to get a commit message.
1929 """
1929 """
1930 if extra is None:
1930 if extra is None:
1931 extra = {}
1931 extra = {}
1932
1932
1933 def fail(f, msg):
1933 def fail(f, msg):
1934 raise error.Abort('%s: %s' % (f, msg))
1934 raise error.Abort('%s: %s' % (f, msg))
1935
1935
1936 if not match:
1936 if not match:
1937 match = matchmod.always(self.root, '')
1937 match = matchmod.always(self.root, '')
1938
1938
1939 if not force:
1939 if not force:
1940 vdirs = []
1940 vdirs = []
1941 match.explicitdir = vdirs.append
1941 match.explicitdir = vdirs.append
1942 match.bad = fail
1942 match.bad = fail
1943
1943
1944 wlock = lock = tr = None
1944 wlock = lock = tr = None
1945 try:
1945 try:
1946 wlock = self.wlock()
1946 wlock = self.wlock()
1947 lock = self.lock() # for recent changelog (see issue4368)
1947 lock = self.lock() # for recent changelog (see issue4368)
1948
1948
1949 wctx = self[None]
1949 wctx = self[None]
1950 merge = len(wctx.parents()) > 1
1950 merge = len(wctx.parents()) > 1
1951
1951
1952 if not force and merge and not match.always():
1952 if not force and merge and not match.always():
1953 raise error.Abort(_('cannot partially commit a merge '
1953 raise error.Abort(_('cannot partially commit a merge '
1954 '(do not specify files or patterns)'))
1954 '(do not specify files or patterns)'))
1955
1955
1956 status = self.status(match=match, clean=force)
1956 status = self.status(match=match, clean=force)
1957 if force:
1957 if force:
1958 status.modified.extend(status.clean) # mq may commit clean files
1958 status.modified.extend(status.clean) # mq may commit clean files
1959
1959
1960 # check subrepos
1960 # check subrepos
1961 subs, commitsubs, newstate = subrepoutil.precommit(
1961 subs, commitsubs, newstate = subrepoutil.precommit(
1962 self.ui, wctx, status, match, force=force)
1962 self.ui, wctx, status, match, force=force)
1963
1963
1964 # make sure all explicit patterns are matched
1964 # make sure all explicit patterns are matched
1965 if not force:
1965 if not force:
1966 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1966 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1967
1967
1968 cctx = context.workingcommitctx(self, status,
1968 cctx = context.workingcommitctx(self, status,
1969 text, user, date, extra)
1969 text, user, date, extra)
1970
1970
1971 # internal config: ui.allowemptycommit
1971 # internal config: ui.allowemptycommit
1972 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1972 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1973 or extra.get('close') or merge or cctx.files()
1973 or extra.get('close') or merge or cctx.files()
1974 or self.ui.configbool('ui', 'allowemptycommit'))
1974 or self.ui.configbool('ui', 'allowemptycommit'))
1975 if not allowemptycommit:
1975 if not allowemptycommit:
1976 return None
1976 return None
1977
1977
1978 if merge and cctx.deleted():
1978 if merge and cctx.deleted():
1979 raise error.Abort(_("cannot commit merge with missing files"))
1979 raise error.Abort(_("cannot commit merge with missing files"))
1980
1980
1981 ms = mergemod.mergestate.read(self)
1981 ms = mergemod.mergestate.read(self)
1982 mergeutil.checkunresolved(ms)
1982 mergeutil.checkunresolved(ms)
1983
1983
1984 if editor:
1984 if editor:
1985 cctx._text = editor(self, cctx, subs)
1985 cctx._text = editor(self, cctx, subs)
1986 edited = (text != cctx._text)
1986 edited = (text != cctx._text)
1987
1987
1988 # Save commit message in case this transaction gets rolled back
1988 # Save commit message in case this transaction gets rolled back
1989 # (e.g. by a pretxncommit hook). Leave the content alone on
1989 # (e.g. by a pretxncommit hook). Leave the content alone on
1990 # the assumption that the user will use the same editor again.
1990 # the assumption that the user will use the same editor again.
1991 msgfn = self.savecommitmessage(cctx._text)
1991 msgfn = self.savecommitmessage(cctx._text)
1992
1992
1993 # commit subs and write new state
1993 # commit subs and write new state
1994 if subs:
1994 if subs:
1995 for s in sorted(commitsubs):
1995 for s in sorted(commitsubs):
1996 sub = wctx.sub(s)
1996 sub = wctx.sub(s)
1997 self.ui.status(_('committing subrepository %s\n') %
1997 self.ui.status(_('committing subrepository %s\n') %
1998 subrepoutil.subrelpath(sub))
1998 subrepoutil.subrelpath(sub))
1999 sr = sub.commit(cctx._text, user, date)
1999 sr = sub.commit(cctx._text, user, date)
2000 newstate[s] = (newstate[s][0], sr)
2000 newstate[s] = (newstate[s][0], sr)
2001 subrepoutil.writestate(self, newstate)
2001 subrepoutil.writestate(self, newstate)
2002
2002
2003 p1, p2 = self.dirstate.parents()
2003 p1, p2 = self.dirstate.parents()
2004 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2004 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2005 try:
2005 try:
2006 self.hook("precommit", throw=True, parent1=hookp1,
2006 self.hook("precommit", throw=True, parent1=hookp1,
2007 parent2=hookp2)
2007 parent2=hookp2)
2008 tr = self.transaction('commit')
2008 tr = self.transaction('commit')
2009 ret = self.commitctx(cctx, True)
2009 ret = self.commitctx(cctx, True)
2010 except: # re-raises
2010 except: # re-raises
2011 if edited:
2011 if edited:
2012 self.ui.write(
2012 self.ui.write(
2013 _('note: commit message saved in %s\n') % msgfn)
2013 _('note: commit message saved in %s\n') % msgfn)
2014 raise
2014 raise
2015 # update bookmarks, dirstate and mergestate
2015 # update bookmarks, dirstate and mergestate
2016 bookmarks.update(self, [p1, p2], ret)
2016 bookmarks.update(self, [p1, p2], ret)
2017 cctx.markcommitted(ret)
2017 cctx.markcommitted(ret)
2018 ms.reset()
2018 ms.reset()
2019 tr.close()
2019 tr.close()
2020
2020
2021 finally:
2021 finally:
2022 lockmod.release(tr, lock, wlock)
2022 lockmod.release(tr, lock, wlock)
2023
2023
2024 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2024 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2025 # hack for command that use a temporary commit (eg: histedit)
2025 # hack for command that use a temporary commit (eg: histedit)
2026 # temporary commit got stripped before hook release
2026 # temporary commit got stripped before hook release
2027 if self.changelog.hasnode(ret):
2027 if self.changelog.hasnode(ret):
2028 self.hook("commit", node=node, parent1=parent1,
2028 self.hook("commit", node=node, parent1=parent1,
2029 parent2=parent2)
2029 parent2=parent2)
2030 self._afterlock(commithook)
2030 self._afterlock(commithook)
2031 return ret
2031 return ret
2032
2032
2033 @unfilteredmethod
2033 @unfilteredmethod
2034 def commitctx(self, ctx, error=False):
2034 def commitctx(self, ctx, error=False):
2035 """Add a new revision to current repository.
2035 """Add a new revision to current repository.
2036 Revision information is passed via the context argument.
2036 Revision information is passed via the context argument.
2037 """
2037 """
2038
2038
2039 tr = None
2039 tr = None
2040 p1, p2 = ctx.p1(), ctx.p2()
2040 p1, p2 = ctx.p1(), ctx.p2()
2041 user = ctx.user()
2041 user = ctx.user()
2042
2042
2043 lock = self.lock()
2043 lock = self.lock()
2044 try:
2044 try:
2045 tr = self.transaction("commit")
2045 tr = self.transaction("commit")
2046 trp = weakref.proxy(tr)
2046 trp = weakref.proxy(tr)
2047
2047
2048 if ctx.manifestnode():
2048 if ctx.manifestnode():
2049 # reuse an existing manifest revision
2049 # reuse an existing manifest revision
2050 mn = ctx.manifestnode()
2050 mn = ctx.manifestnode()
2051 files = ctx.files()
2051 files = ctx.files()
2052 elif ctx.files():
2052 elif ctx.files():
2053 m1ctx = p1.manifestctx()
2053 m1ctx = p1.manifestctx()
2054 m2ctx = p2.manifestctx()
2054 m2ctx = p2.manifestctx()
2055 mctx = m1ctx.copy()
2055 mctx = m1ctx.copy()
2056
2056
2057 m = mctx.read()
2057 m = mctx.read()
2058 m1 = m1ctx.read()
2058 m1 = m1ctx.read()
2059 m2 = m2ctx.read()
2059 m2 = m2ctx.read()
2060
2060
2061 # check in files
2061 # check in files
2062 added = []
2062 added = []
2063 changed = []
2063 changed = []
2064 removed = list(ctx.removed())
2064 removed = list(ctx.removed())
2065 linkrev = len(self)
2065 linkrev = len(self)
2066 self.ui.note(_("committing files:\n"))
2066 self.ui.note(_("committing files:\n"))
2067 for f in sorted(ctx.modified() + ctx.added()):
2067 for f in sorted(ctx.modified() + ctx.added()):
2068 self.ui.note(f + "\n")
2068 self.ui.note(f + "\n")
2069 try:
2069 try:
2070 fctx = ctx[f]
2070 fctx = ctx[f]
2071 if fctx is None:
2071 if fctx is None:
2072 removed.append(f)
2072 removed.append(f)
2073 else:
2073 else:
2074 added.append(f)
2074 added.append(f)
2075 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2075 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2076 trp, changed)
2076 trp, changed)
2077 m.setflag(f, fctx.flags())
2077 m.setflag(f, fctx.flags())
2078 except OSError as inst:
2078 except OSError as inst:
2079 self.ui.warn(_("trouble committing %s!\n") % f)
2079 self.ui.warn(_("trouble committing %s!\n") % f)
2080 raise
2080 raise
2081 except IOError as inst:
2081 except IOError as inst:
2082 errcode = getattr(inst, 'errno', errno.ENOENT)
2082 errcode = getattr(inst, 'errno', errno.ENOENT)
2083 if error or errcode and errcode != errno.ENOENT:
2083 if error or errcode and errcode != errno.ENOENT:
2084 self.ui.warn(_("trouble committing %s!\n") % f)
2084 self.ui.warn(_("trouble committing %s!\n") % f)
2085 raise
2085 raise
2086
2086
2087 # update manifest
2087 # update manifest
2088 self.ui.note(_("committing manifest\n"))
2088 self.ui.note(_("committing manifest\n"))
2089 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2089 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2090 drop = [f for f in removed if f in m]
2090 drop = [f for f in removed if f in m]
2091 for f in drop:
2091 for f in drop:
2092 del m[f]
2092 del m[f]
2093 mn = mctx.write(trp, linkrev,
2093 mn = mctx.write(trp, linkrev,
2094 p1.manifestnode(), p2.manifestnode(),
2094 p1.manifestnode(), p2.manifestnode(),
2095 added, drop)
2095 added, drop)
2096 files = changed + removed
2096 files = changed + removed
2097 else:
2097 else:
2098 mn = p1.manifestnode()
2098 mn = p1.manifestnode()
2099 files = []
2099 files = []
2100
2100
2101 # update changelog
2101 # update changelog
2102 self.ui.note(_("committing changelog\n"))
2102 self.ui.note(_("committing changelog\n"))
2103 self.changelog.delayupdate(tr)
2103 self.changelog.delayupdate(tr)
2104 n = self.changelog.add(mn, files, ctx.description(),
2104 n = self.changelog.add(mn, files, ctx.description(),
2105 trp, p1.node(), p2.node(),
2105 trp, p1.node(), p2.node(),
2106 user, ctx.date(), ctx.extra().copy())
2106 user, ctx.date(), ctx.extra().copy())
2107 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2107 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2108 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2108 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2109 parent2=xp2)
2109 parent2=xp2)
2110 # set the new commit is proper phase
2110 # set the new commit is proper phase
2111 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2111 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2112 if targetphase:
2112 if targetphase:
2113 # retract boundary do not alter parent changeset.
2113 # retract boundary do not alter parent changeset.
2114 # if a parent have higher the resulting phase will
2114 # if a parent have higher the resulting phase will
2115 # be compliant anyway
2115 # be compliant anyway
2116 #
2116 #
2117 # if minimal phase was 0 we don't need to retract anything
2117 # if minimal phase was 0 we don't need to retract anything
2118 phases.registernew(self, tr, targetphase, [n])
2118 phases.registernew(self, tr, targetphase, [n])
2119 tr.close()
2119 tr.close()
2120 return n
2120 return n
2121 finally:
2121 finally:
2122 if tr:
2122 if tr:
2123 tr.release()
2123 tr.release()
2124 lock.release()
2124 lock.release()
2125
2125
2126 @unfilteredmethod
2126 @unfilteredmethod
2127 def destroying(self):
2127 def destroying(self):
2128 '''Inform the repository that nodes are about to be destroyed.
2128 '''Inform the repository that nodes are about to be destroyed.
2129 Intended for use by strip and rollback, so there's a common
2129 Intended for use by strip and rollback, so there's a common
2130 place for anything that has to be done before destroying history.
2130 place for anything that has to be done before destroying history.
2131
2131
2132 This is mostly useful for saving state that is in memory and waiting
2132 This is mostly useful for saving state that is in memory and waiting
2133 to be flushed when the current lock is released. Because a call to
2133 to be flushed when the current lock is released. Because a call to
2134 destroyed is imminent, the repo will be invalidated causing those
2134 destroyed is imminent, the repo will be invalidated causing those
2135 changes to stay in memory (waiting for the next unlock), or vanish
2135 changes to stay in memory (waiting for the next unlock), or vanish
2136 completely.
2136 completely.
2137 '''
2137 '''
2138 # When using the same lock to commit and strip, the phasecache is left
2138 # When using the same lock to commit and strip, the phasecache is left
2139 # dirty after committing. Then when we strip, the repo is invalidated,
2139 # dirty after committing. Then when we strip, the repo is invalidated,
2140 # causing those changes to disappear.
2140 # causing those changes to disappear.
2141 if '_phasecache' in vars(self):
2141 if '_phasecache' in vars(self):
2142 self._phasecache.write()
2142 self._phasecache.write()
2143
2143
2144 @unfilteredmethod
2144 @unfilteredmethod
2145 def destroyed(self):
2145 def destroyed(self):
2146 '''Inform the repository that nodes have been destroyed.
2146 '''Inform the repository that nodes have been destroyed.
2147 Intended for use by strip and rollback, so there's a common
2147 Intended for use by strip and rollback, so there's a common
2148 place for anything that has to be done after destroying history.
2148 place for anything that has to be done after destroying history.
2149 '''
2149 '''
2150 # When one tries to:
2150 # When one tries to:
2151 # 1) destroy nodes thus calling this method (e.g. strip)
2151 # 1) destroy nodes thus calling this method (e.g. strip)
2152 # 2) use phasecache somewhere (e.g. commit)
2152 # 2) use phasecache somewhere (e.g. commit)
2153 #
2153 #
2154 # then 2) will fail because the phasecache contains nodes that were
2154 # then 2) will fail because the phasecache contains nodes that were
2155 # removed. We can either remove phasecache from the filecache,
2155 # removed. We can either remove phasecache from the filecache,
2156 # causing it to reload next time it is accessed, or simply filter
2156 # causing it to reload next time it is accessed, or simply filter
2157 # the removed nodes now and write the updated cache.
2157 # the removed nodes now and write the updated cache.
2158 self._phasecache.filterunknown(self)
2158 self._phasecache.filterunknown(self)
2159 self._phasecache.write()
2159 self._phasecache.write()
2160
2160
2161 # refresh all repository caches
2161 # refresh all repository caches
2162 self.updatecaches()
2162 self.updatecaches()
2163
2163
2164 # Ensure the persistent tag cache is updated. Doing it now
2164 # Ensure the persistent tag cache is updated. Doing it now
2165 # means that the tag cache only has to worry about destroyed
2165 # means that the tag cache only has to worry about destroyed
2166 # heads immediately after a strip/rollback. That in turn
2166 # heads immediately after a strip/rollback. That in turn
2167 # guarantees that "cachetip == currenttip" (comparing both rev
2167 # guarantees that "cachetip == currenttip" (comparing both rev
2168 # and node) always means no nodes have been added or destroyed.
2168 # and node) always means no nodes have been added or destroyed.
2169
2169
2170 # XXX this is suboptimal when qrefresh'ing: we strip the current
2170 # XXX this is suboptimal when qrefresh'ing: we strip the current
2171 # head, refresh the tag cache, then immediately add a new head.
2171 # head, refresh the tag cache, then immediately add a new head.
2172 # But I think doing it this way is necessary for the "instant
2172 # But I think doing it this way is necessary for the "instant
2173 # tag cache retrieval" case to work.
2173 # tag cache retrieval" case to work.
2174 self.invalidate()
2174 self.invalidate()
2175
2175
2176 def status(self, node1='.', node2=None, match=None,
2176 def status(self, node1='.', node2=None, match=None,
2177 ignored=False, clean=False, unknown=False,
2177 ignored=False, clean=False, unknown=False,
2178 listsubrepos=False):
2178 listsubrepos=False):
2179 '''a convenience method that calls node1.status(node2)'''
2179 '''a convenience method that calls node1.status(node2)'''
2180 return self[node1].status(node2, match, ignored, clean, unknown,
2180 return self[node1].status(node2, match, ignored, clean, unknown,
2181 listsubrepos)
2181 listsubrepos)
2182
2182
2183 def addpostdsstatus(self, ps):
2183 def addpostdsstatus(self, ps):
2184 """Add a callback to run within the wlock, at the point at which status
2184 """Add a callback to run within the wlock, at the point at which status
2185 fixups happen.
2185 fixups happen.
2186
2186
2187 On status completion, callback(wctx, status) will be called with the
2187 On status completion, callback(wctx, status) will be called with the
2188 wlock held, unless the dirstate has changed from underneath or the wlock
2188 wlock held, unless the dirstate has changed from underneath or the wlock
2189 couldn't be grabbed.
2189 couldn't be grabbed.
2190
2190
2191 Callbacks should not capture and use a cached copy of the dirstate --
2191 Callbacks should not capture and use a cached copy of the dirstate --
2192 it might change in the meanwhile. Instead, they should access the
2192 it might change in the meanwhile. Instead, they should access the
2193 dirstate via wctx.repo().dirstate.
2193 dirstate via wctx.repo().dirstate.
2194
2194
2195 This list is emptied out after each status run -- extensions should
2195 This list is emptied out after each status run -- extensions should
2196 make sure it adds to this list each time dirstate.status is called.
2196 make sure it adds to this list each time dirstate.status is called.
2197 Extensions should also make sure they don't call this for statuses
2197 Extensions should also make sure they don't call this for statuses
2198 that don't involve the dirstate.
2198 that don't involve the dirstate.
2199 """
2199 """
2200
2200
2201 # The list is located here for uniqueness reasons -- it is actually
2201 # The list is located here for uniqueness reasons -- it is actually
2202 # managed by the workingctx, but that isn't unique per-repo.
2202 # managed by the workingctx, but that isn't unique per-repo.
2203 self._postdsstatus.append(ps)
2203 self._postdsstatus.append(ps)
2204
2204
2205 def postdsstatus(self):
2205 def postdsstatus(self):
2206 """Used by workingctx to get the list of post-dirstate-status hooks."""
2206 """Used by workingctx to get the list of post-dirstate-status hooks."""
2207 return self._postdsstatus
2207 return self._postdsstatus
2208
2208
2209 def clearpostdsstatus(self):
2209 def clearpostdsstatus(self):
2210 """Used by workingctx to clear post-dirstate-status hooks."""
2210 """Used by workingctx to clear post-dirstate-status hooks."""
2211 del self._postdsstatus[:]
2211 del self._postdsstatus[:]
2212
2212
2213 def heads(self, start=None):
2213 def heads(self, start=None):
2214 if start is None:
2214 if start is None:
2215 cl = self.changelog
2215 cl = self.changelog
2216 headrevs = reversed(cl.headrevs())
2216 headrevs = reversed(cl.headrevs())
2217 return [cl.node(rev) for rev in headrevs]
2217 return [cl.node(rev) for rev in headrevs]
2218
2218
2219 heads = self.changelog.heads(start)
2219 heads = self.changelog.heads(start)
2220 # sort the output in rev descending order
2220 # sort the output in rev descending order
2221 return sorted(heads, key=self.changelog.rev, reverse=True)
2221 return sorted(heads, key=self.changelog.rev, reverse=True)
2222
2222
2223 def branchheads(self, branch=None, start=None, closed=False):
2223 def branchheads(self, branch=None, start=None, closed=False):
2224 '''return a (possibly filtered) list of heads for the given branch
2224 '''return a (possibly filtered) list of heads for the given branch
2225
2225
2226 Heads are returned in topological order, from newest to oldest.
2226 Heads are returned in topological order, from newest to oldest.
2227 If branch is None, use the dirstate branch.
2227 If branch is None, use the dirstate branch.
2228 If start is not None, return only heads reachable from start.
2228 If start is not None, return only heads reachable from start.
2229 If closed is True, return heads that are marked as closed as well.
2229 If closed is True, return heads that are marked as closed as well.
2230 '''
2230 '''
2231 if branch is None:
2231 if branch is None:
2232 branch = self[None].branch()
2232 branch = self[None].branch()
2233 branches = self.branchmap()
2233 branches = self.branchmap()
2234 if branch not in branches:
2234 if branch not in branches:
2235 return []
2235 return []
2236 # the cache returns heads ordered lowest to highest
2236 # the cache returns heads ordered lowest to highest
2237 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2237 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2238 if start is not None:
2238 if start is not None:
2239 # filter out the heads that cannot be reached from startrev
2239 # filter out the heads that cannot be reached from startrev
2240 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2240 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2241 bheads = [h for h in bheads if h in fbheads]
2241 bheads = [h for h in bheads if h in fbheads]
2242 return bheads
2242 return bheads
2243
2243
2244 def branches(self, nodes):
2244 def branches(self, nodes):
2245 if not nodes:
2245 if not nodes:
2246 nodes = [self.changelog.tip()]
2246 nodes = [self.changelog.tip()]
2247 b = []
2247 b = []
2248 for n in nodes:
2248 for n in nodes:
2249 t = n
2249 t = n
2250 while True:
2250 while True:
2251 p = self.changelog.parents(n)
2251 p = self.changelog.parents(n)
2252 if p[1] != nullid or p[0] == nullid:
2252 if p[1] != nullid or p[0] == nullid:
2253 b.append((t, n, p[0], p[1]))
2253 b.append((t, n, p[0], p[1]))
2254 break
2254 break
2255 n = p[0]
2255 n = p[0]
2256 return b
2256 return b
2257
2257
2258 def between(self, pairs):
2258 def between(self, pairs):
2259 r = []
2259 r = []
2260
2260
2261 for top, bottom in pairs:
2261 for top, bottom in pairs:
2262 n, l, i = top, [], 0
2262 n, l, i = top, [], 0
2263 f = 1
2263 f = 1
2264
2264
2265 while n != bottom and n != nullid:
2265 while n != bottom and n != nullid:
2266 p = self.changelog.parents(n)[0]
2266 p = self.changelog.parents(n)[0]
2267 if i == f:
2267 if i == f:
2268 l.append(n)
2268 l.append(n)
2269 f = f * 2
2269 f = f * 2
2270 n = p
2270 n = p
2271 i += 1
2271 i += 1
2272
2272
2273 r.append(l)
2273 r.append(l)
2274
2274
2275 return r
2275 return r
2276
2276
2277 def checkpush(self, pushop):
2277 def checkpush(self, pushop):
2278 """Extensions can override this function if additional checks have
2278 """Extensions can override this function if additional checks have
2279 to be performed before pushing, or call it if they override push
2279 to be performed before pushing, or call it if they override push
2280 command.
2280 command.
2281 """
2281 """
2282
2282
2283 @unfilteredpropertycache
2283 @unfilteredpropertycache
2284 def prepushoutgoinghooks(self):
2284 def prepushoutgoinghooks(self):
2285 """Return util.hooks consists of a pushop with repo, remote, outgoing
2285 """Return util.hooks consists of a pushop with repo, remote, outgoing
2286 methods, which are called before pushing changesets.
2286 methods, which are called before pushing changesets.
2287 """
2287 """
2288 return util.hooks()
2288 return util.hooks()
2289
2289
2290 def pushkey(self, namespace, key, old, new):
2290 def pushkey(self, namespace, key, old, new):
2291 try:
2291 try:
2292 tr = self.currenttransaction()
2292 tr = self.currenttransaction()
2293 hookargs = {}
2293 hookargs = {}
2294 if tr is not None:
2294 if tr is not None:
2295 hookargs.update(tr.hookargs)
2295 hookargs.update(tr.hookargs)
2296 hookargs = pycompat.strkwargs(hookargs)
2296 hookargs = pycompat.strkwargs(hookargs)
2297 hookargs[r'namespace'] = namespace
2297 hookargs[r'namespace'] = namespace
2298 hookargs[r'key'] = key
2298 hookargs[r'key'] = key
2299 hookargs[r'old'] = old
2299 hookargs[r'old'] = old
2300 hookargs[r'new'] = new
2300 hookargs[r'new'] = new
2301 self.hook('prepushkey', throw=True, **hookargs)
2301 self.hook('prepushkey', throw=True, **hookargs)
2302 except error.HookAbort as exc:
2302 except error.HookAbort as exc:
2303 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2303 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2304 if exc.hint:
2304 if exc.hint:
2305 self.ui.write_err(_("(%s)\n") % exc.hint)
2305 self.ui.write_err(_("(%s)\n") % exc.hint)
2306 return False
2306 return False
2307 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2307 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2308 ret = pushkey.push(self, namespace, key, old, new)
2308 ret = pushkey.push(self, namespace, key, old, new)
2309 def runhook():
2309 def runhook():
2310 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2310 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2311 ret=ret)
2311 ret=ret)
2312 self._afterlock(runhook)
2312 self._afterlock(runhook)
2313 return ret
2313 return ret
2314
2314
2315 def listkeys(self, namespace):
2315 def listkeys(self, namespace):
2316 self.hook('prelistkeys', throw=True, namespace=namespace)
2316 self.hook('prelistkeys', throw=True, namespace=namespace)
2317 self.ui.debug('listing keys for "%s"\n' % namespace)
2317 self.ui.debug('listing keys for "%s"\n' % namespace)
2318 values = pushkey.list(self, namespace)
2318 values = pushkey.list(self, namespace)
2319 self.hook('listkeys', namespace=namespace, values=values)
2319 self.hook('listkeys', namespace=namespace, values=values)
2320 return values
2320 return values
2321
2321
2322 def debugwireargs(self, one, two, three=None, four=None, five=None):
2322 def debugwireargs(self, one, two, three=None, four=None, five=None):
2323 '''used to test argument passing over the wire'''
2323 '''used to test argument passing over the wire'''
2324 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2324 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2325 pycompat.bytestr(four),
2325 pycompat.bytestr(four),
2326 pycompat.bytestr(five))
2326 pycompat.bytestr(five))
2327
2327
2328 def savecommitmessage(self, text):
2328 def savecommitmessage(self, text):
2329 fp = self.vfs('last-message.txt', 'wb')
2329 fp = self.vfs('last-message.txt', 'wb')
2330 try:
2330 try:
2331 fp.write(text)
2331 fp.write(text)
2332 finally:
2332 finally:
2333 fp.close()
2333 fp.close()
2334 return self.pathto(fp.name[len(self.root) + 1:])
2334 return self.pathto(fp.name[len(self.root) + 1:])
2335
2335
2336 # used to avoid circular references so destructors work
2336 # used to avoid circular references so destructors work
2337 def aftertrans(files):
2337 def aftertrans(files):
2338 renamefiles = [tuple(t) for t in files]
2338 renamefiles = [tuple(t) for t in files]
2339 def a():
2339 def a():
2340 for vfs, src, dest in renamefiles:
2340 for vfs, src, dest in renamefiles:
2341 # if src and dest refer to a same file, vfs.rename is a no-op,
2341 # if src and dest refer to a same file, vfs.rename is a no-op,
2342 # leaving both src and dest on disk. delete dest to make sure
2342 # leaving both src and dest on disk. delete dest to make sure
2343 # the rename couldn't be such a no-op.
2343 # the rename couldn't be such a no-op.
2344 vfs.tryunlink(dest)
2344 vfs.tryunlink(dest)
2345 try:
2345 try:
2346 vfs.rename(src, dest)
2346 vfs.rename(src, dest)
2347 except OSError: # journal file does not yet exist
2347 except OSError: # journal file does not yet exist
2348 pass
2348 pass
2349 return a
2349 return a
2350
2350
2351 def undoname(fn):
2351 def undoname(fn):
2352 base, name = os.path.split(fn)
2352 base, name = os.path.split(fn)
2353 assert name.startswith('journal')
2353 assert name.startswith('journal')
2354 return os.path.join(base, name.replace('journal', 'undo', 1))
2354 return os.path.join(base, name.replace('journal', 'undo', 1))
2355
2355
2356 def instance(ui, path, create, intents=None):
2356 def instance(ui, path, create, intents=None):
2357 return localrepository(ui, util.urllocalpath(path), create,
2357 return localrepository(ui, util.urllocalpath(path), create,
2358 intents=intents)
2358 intents=intents)
2359
2359
2360 def islocal(path):
2360 def islocal(path):
2361 return True
2361 return True
2362
2362
2363 def newreporequirements(repo):
2363 def newreporequirements(repo):
2364 """Determine the set of requirements for a new local repository.
2364 """Determine the set of requirements for a new local repository.
2365
2365
2366 Extensions can wrap this function to specify custom requirements for
2366 Extensions can wrap this function to specify custom requirements for
2367 new repositories.
2367 new repositories.
2368 """
2368 """
2369 ui = repo.ui
2369 ui = repo.ui
2370 requirements = {'revlogv1'}
2370 requirements = {'revlogv1'}
2371 if ui.configbool('format', 'usestore'):
2371 if ui.configbool('format', 'usestore'):
2372 requirements.add('store')
2372 requirements.add('store')
2373 if ui.configbool('format', 'usefncache'):
2373 if ui.configbool('format', 'usefncache'):
2374 requirements.add('fncache')
2374 requirements.add('fncache')
2375 if ui.configbool('format', 'dotencode'):
2375 if ui.configbool('format', 'dotencode'):
2376 requirements.add('dotencode')
2376 requirements.add('dotencode')
2377
2377
2378 compengine = ui.config('experimental', 'format.compression')
2378 compengine = ui.config('experimental', 'format.compression')
2379 if compengine not in util.compengines:
2379 if compengine not in util.compengines:
2380 raise error.Abort(_('compression engine %s defined by '
2380 raise error.Abort(_('compression engine %s defined by '
2381 'experimental.format.compression not available') %
2381 'experimental.format.compression not available') %
2382 compengine,
2382 compengine,
2383 hint=_('run "hg debuginstall" to list available '
2383 hint=_('run "hg debuginstall" to list available '
2384 'compression engines'))
2384 'compression engines'))
2385
2385
2386 # zlib is the historical default and doesn't need an explicit requirement.
2386 # zlib is the historical default and doesn't need an explicit requirement.
2387 if compengine != 'zlib':
2387 if compengine != 'zlib':
2388 requirements.add('exp-compression-%s' % compengine)
2388 requirements.add('exp-compression-%s' % compengine)
2389
2389
2390 if scmutil.gdinitconfig(ui):
2390 if scmutil.gdinitconfig(ui):
2391 requirements.add('generaldelta')
2391 requirements.add('generaldelta')
2392 if ui.configbool('experimental', 'treemanifest'):
2392 if ui.configbool('experimental', 'treemanifest'):
2393 requirements.add('treemanifest')
2393 requirements.add('treemanifest')
2394 # experimental config: format.sparse-revlog
2394 # experimental config: format.sparse-revlog
2395 if ui.configbool('format', 'sparse-revlog'):
2395 if ui.configbool('format', 'sparse-revlog'):
2396 requirements.add(SPARSEREVLOG_REQUIREMENT)
2396 requirements.add(SPARSEREVLOG_REQUIREMENT)
2397
2397
2398 revlogv2 = ui.config('experimental', 'revlogv2')
2398 revlogv2 = ui.config('experimental', 'revlogv2')
2399 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2399 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2400 requirements.remove('revlogv1')
2400 requirements.remove('revlogv1')
2401 # generaldelta is implied by revlogv2.
2401 # generaldelta is implied by revlogv2.
2402 requirements.discard('generaldelta')
2402 requirements.discard('generaldelta')
2403 requirements.add(REVLOGV2_REQUIREMENT)
2403 requirements.add(REVLOGV2_REQUIREMENT)
2404
2404
2405 return requirements
2405 return requirements
@@ -1,198 +1,198 b''
1 # narrowspec.py - methods for working with a narrow view of a repository
1 # narrowspec.py - methods for working with a narrow view of a repository
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 error,
14 error,
15 match as matchmod,
15 match as matchmod,
16 repository,
16 repository,
17 sparse,
17 sparse,
18 util,
18 util,
19 )
19 )
20
20
21 FILENAME = 'narrowspec'
21 FILENAME = 'narrowspec'
22
22
23 def parseserverpatterns(text):
23 def parseserverpatterns(text):
24 """Parses the narrowspec format that's returned by the server."""
24 """Parses the narrowspec format that's returned by the server."""
25 includepats = set()
25 includepats = set()
26 excludepats = set()
26 excludepats = set()
27
27
28 # We get one entry per line, in the format "<key> <value>".
28 # We get one entry per line, in the format "<key> <value>".
29 # It's OK for value to contain other spaces.
29 # It's OK for value to contain other spaces.
30 for kp in (l.split(' ', 1) for l in text.splitlines()):
30 for kp in (l.split(' ', 1) for l in text.splitlines()):
31 if len(kp) != 2:
31 if len(kp) != 2:
32 raise error.Abort(_('Invalid narrowspec pattern line: "%s"') % kp)
32 raise error.Abort(_('Invalid narrowspec pattern line: "%s"') % kp)
33 key = kp[0]
33 key = kp[0]
34 pat = kp[1]
34 pat = kp[1]
35 if key == 'include':
35 if key == 'include':
36 includepats.add(pat)
36 includepats.add(pat)
37 elif key == 'exclude':
37 elif key == 'exclude':
38 excludepats.add(pat)
38 excludepats.add(pat)
39 else:
39 else:
40 raise error.Abort(_('Invalid key "%s" in server response') % key)
40 raise error.Abort(_('Invalid key "%s" in server response') % key)
41
41
42 return includepats, excludepats
42 return includepats, excludepats
43
43
44 def normalizesplitpattern(kind, pat):
44 def normalizesplitpattern(kind, pat):
45 """Returns the normalized version of a pattern and kind.
45 """Returns the normalized version of a pattern and kind.
46
46
47 Returns a tuple with the normalized kind and normalized pattern.
47 Returns a tuple with the normalized kind and normalized pattern.
48 """
48 """
49 pat = pat.rstrip('/')
49 pat = pat.rstrip('/')
50 _validatepattern(pat)
50 _validatepattern(pat)
51 return kind, pat
51 return kind, pat
52
52
53 def _numlines(s):
53 def _numlines(s):
54 """Returns the number of lines in s, including ending empty lines."""
54 """Returns the number of lines in s, including ending empty lines."""
55 # We use splitlines because it is Unicode-friendly and thus Python 3
55 # We use splitlines because it is Unicode-friendly and thus Python 3
56 # compatible. However, it does not count empty lines at the end, so trick
56 # compatible. However, it does not count empty lines at the end, so trick
57 # it by adding a character at the end.
57 # it by adding a character at the end.
58 return len((s + 'x').splitlines())
58 return len((s + 'x').splitlines())
59
59
60 def _validatepattern(pat):
60 def _validatepattern(pat):
61 """Validates the pattern and aborts if it is invalid.
61 """Validates the pattern and aborts if it is invalid.
62
62
63 Patterns are stored in the narrowspec as newline-separated
63 Patterns are stored in the narrowspec as newline-separated
64 POSIX-style bytestring paths. There's no escaping.
64 POSIX-style bytestring paths. There's no escaping.
65 """
65 """
66
66
67 # We use newlines as separators in the narrowspec file, so don't allow them
67 # We use newlines as separators in the narrowspec file, so don't allow them
68 # in patterns.
68 # in patterns.
69 if _numlines(pat) > 1:
69 if _numlines(pat) > 1:
70 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
70 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
71
71
72 components = pat.split('/')
72 components = pat.split('/')
73 if '.' in components or '..' in components:
73 if '.' in components or '..' in components:
74 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
74 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
75
75
76 def normalizepattern(pattern, defaultkind='path'):
76 def normalizepattern(pattern, defaultkind='path'):
77 """Returns the normalized version of a text-format pattern.
77 """Returns the normalized version of a text-format pattern.
78
78
79 If the pattern has no kind, the default will be added.
79 If the pattern has no kind, the default will be added.
80 """
80 """
81 kind, pat = matchmod._patsplit(pattern, defaultkind)
81 kind, pat = matchmod._patsplit(pattern, defaultkind)
82 return '%s:%s' % normalizesplitpattern(kind, pat)
82 return '%s:%s' % normalizesplitpattern(kind, pat)
83
83
84 def parsepatterns(pats):
84 def parsepatterns(pats):
85 """Parses a list of patterns into a typed pattern set."""
85 """Parses a list of patterns into a typed pattern set."""
86 return set(normalizepattern(p) for p in pats)
86 return set(normalizepattern(p) for p in pats)
87
87
88 def format(includes, excludes):
88 def format(includes, excludes):
89 output = '[include]\n'
89 output = '[include]\n'
90 for i in sorted(includes - excludes):
90 for i in sorted(includes - excludes):
91 output += i + '\n'
91 output += i + '\n'
92 output += '[exclude]\n'
92 output += '[exclude]\n'
93 for e in sorted(excludes):
93 for e in sorted(excludes):
94 output += e + '\n'
94 output += e + '\n'
95 return output
95 return output
96
96
97 def match(root, include=None, exclude=None):
97 def match(root, include=None, exclude=None):
98 if not include:
98 if not include:
99 # Passing empty include and empty exclude to matchmod.match()
99 # Passing empty include and empty exclude to matchmod.match()
100 # gives a matcher that matches everything, so explicitly use
100 # gives a matcher that matches everything, so explicitly use
101 # the nevermatcher.
101 # the nevermatcher.
102 return matchmod.never(root, '')
102 return matchmod.never(root, '')
103 return matchmod.match(root, '', [], include=include or [],
103 return matchmod.match(root, '', [], include=include or [],
104 exclude=exclude or [])
104 exclude=exclude or [])
105
105
106 def needsexpansion(includes):
106 def needsexpansion(includes):
107 return [i for i in includes if i.startswith('include:')]
107 return [i for i in includes if i.startswith('include:')]
108
108
109 def load(repo):
109 def load(repo):
110 try:
110 try:
111 spec = repo.vfs.read(FILENAME)
111 spec = repo.svfs.read(FILENAME)
112 except IOError as e:
112 except IOError as e:
113 # Treat "narrowspec does not exist" the same as "narrowspec file exists
113 # Treat "narrowspec does not exist" the same as "narrowspec file exists
114 # and is empty".
114 # and is empty".
115 if e.errno == errno.ENOENT:
115 if e.errno == errno.ENOENT:
116 return set(), set()
116 return set(), set()
117 raise
117 raise
118 # maybe we should care about the profiles returned too
118 # maybe we should care about the profiles returned too
119 includepats, excludepats, profiles = sparse.parseconfig(repo.ui, spec,
119 includepats, excludepats, profiles = sparse.parseconfig(repo.ui, spec,
120 'narrow')
120 'narrow')
121 if profiles:
121 if profiles:
122 raise error.Abort(_("including other spec files using '%include' is not"
122 raise error.Abort(_("including other spec files using '%include' is not"
123 " suported in narrowspec"))
123 " suported in narrowspec"))
124 return includepats, excludepats
124 return includepats, excludepats
125
125
126 def save(repo, includepats, excludepats):
126 def save(repo, includepats, excludepats):
127 spec = format(includepats, excludepats)
127 spec = format(includepats, excludepats)
128 repo.vfs.write(FILENAME, spec)
128 repo.svfs.write(FILENAME, spec)
129
129
130 def savebackup(repo, backupname):
130 def savebackup(repo, backupname):
131 if repository.NARROW_REQUIREMENT not in repo.requirements:
131 if repository.NARROW_REQUIREMENT not in repo.requirements:
132 return
132 return
133 vfs = repo.vfs
133 vfs = repo.vfs
134 vfs.tryunlink(backupname)
134 vfs.tryunlink(backupname)
135 util.copyfile(vfs.join(FILENAME), vfs.join(backupname), hardlink=True)
135 util.copyfile(repo.svfs.join(FILENAME), vfs.join(backupname), hardlink=True)
136
136
137 def restorebackup(repo, backupname):
137 def restorebackup(repo, backupname):
138 if repository.NARROW_REQUIREMENT not in repo.requirements:
138 if repository.NARROW_REQUIREMENT not in repo.requirements:
139 return
139 return
140 repo.vfs.rename(backupname, FILENAME)
140 util.rename(repo.vfs.join(backupname), repo.svfs.join(FILENAME))
141
141
142 def clearbackup(repo, backupname):
142 def clearbackup(repo, backupname):
143 if repository.NARROW_REQUIREMENT not in repo.requirements:
143 if repository.NARROW_REQUIREMENT not in repo.requirements:
144 return
144 return
145 repo.vfs.unlink(backupname)
145 repo.vfs.unlink(backupname)
146
146
147 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
147 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
148 r""" Restricts the patterns according to repo settings,
148 r""" Restricts the patterns according to repo settings,
149 results in a logical AND operation
149 results in a logical AND operation
150
150
151 :param req_includes: requested includes
151 :param req_includes: requested includes
152 :param req_excludes: requested excludes
152 :param req_excludes: requested excludes
153 :param repo_includes: repo includes
153 :param repo_includes: repo includes
154 :param repo_excludes: repo excludes
154 :param repo_excludes: repo excludes
155 :return: include patterns, exclude patterns, and invalid include patterns.
155 :return: include patterns, exclude patterns, and invalid include patterns.
156
156
157 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
157 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
158 (set(['f1']), {}, [])
158 (set(['f1']), {}, [])
159 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
159 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
160 (set(['f1']), {}, [])
160 (set(['f1']), {}, [])
161 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
161 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
162 (set(['f1/fc1']), {}, [])
162 (set(['f1/fc1']), {}, [])
163 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
163 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
164 ([], set(['path:.']), [])
164 ([], set(['path:.']), [])
165 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
165 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
166 (set(['f2/fc2']), {}, [])
166 (set(['f2/fc2']), {}, [])
167 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
167 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
168 ([], set(['path:.']), [])
168 ([], set(['path:.']), [])
169 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
169 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
170 (set(['f1/$non_exitent_var']), {}, [])
170 (set(['f1/$non_exitent_var']), {}, [])
171 """
171 """
172 res_excludes = set(req_excludes)
172 res_excludes = set(req_excludes)
173 res_excludes.update(repo_excludes)
173 res_excludes.update(repo_excludes)
174 invalid_includes = []
174 invalid_includes = []
175 if not req_includes:
175 if not req_includes:
176 res_includes = set(repo_includes)
176 res_includes = set(repo_includes)
177 elif 'path:.' not in repo_includes:
177 elif 'path:.' not in repo_includes:
178 res_includes = []
178 res_includes = []
179 for req_include in req_includes:
179 for req_include in req_includes:
180 req_include = util.expandpath(util.normpath(req_include))
180 req_include = util.expandpath(util.normpath(req_include))
181 if req_include in repo_includes:
181 if req_include in repo_includes:
182 res_includes.append(req_include)
182 res_includes.append(req_include)
183 continue
183 continue
184 valid = False
184 valid = False
185 for repo_include in repo_includes:
185 for repo_include in repo_includes:
186 if req_include.startswith(repo_include + '/'):
186 if req_include.startswith(repo_include + '/'):
187 valid = True
187 valid = True
188 res_includes.append(req_include)
188 res_includes.append(req_include)
189 break
189 break
190 if not valid:
190 if not valid:
191 invalid_includes.append(req_include)
191 invalid_includes.append(req_include)
192 if len(res_includes) == 0:
192 if len(res_includes) == 0:
193 res_excludes = {'path:.'}
193 res_excludes = {'path:.'}
194 else:
194 else:
195 res_includes = set(res_includes)
195 res_includes = set(res_includes)
196 else:
196 else:
197 res_includes = set(req_includes)
197 res_includes = set(req_includes)
198 return res_includes, res_excludes, invalid_includes
198 return res_includes, res_excludes, invalid_includes
@@ -1,594 +1,594 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 error,
17 error,
18 node,
18 node,
19 policy,
19 policy,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 vfs as vfsmod,
22 vfs as vfsmod,
23 )
23 )
24
24
25 parsers = policy.importmod(r'parsers')
25 parsers = policy.importmod(r'parsers')
26
26
27 # This avoids a collision between a file named foo and a dir named
27 # This avoids a collision between a file named foo and a dir named
28 # foo.i or foo.d
28 # foo.i or foo.d
29 def _encodedir(path):
29 def _encodedir(path):
30 '''
30 '''
31 >>> _encodedir(b'data/foo.i')
31 >>> _encodedir(b'data/foo.i')
32 'data/foo.i'
32 'data/foo.i'
33 >>> _encodedir(b'data/foo.i/bla.i')
33 >>> _encodedir(b'data/foo.i/bla.i')
34 'data/foo.i.hg/bla.i'
34 'data/foo.i.hg/bla.i'
35 >>> _encodedir(b'data/foo.i.hg/bla.i')
35 >>> _encodedir(b'data/foo.i.hg/bla.i')
36 'data/foo.i.hg.hg/bla.i'
36 'data/foo.i.hg.hg/bla.i'
37 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
37 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
38 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
38 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
39 '''
39 '''
40 return (path
40 return (path
41 .replace(".hg/", ".hg.hg/")
41 .replace(".hg/", ".hg.hg/")
42 .replace(".i/", ".i.hg/")
42 .replace(".i/", ".i.hg/")
43 .replace(".d/", ".d.hg/"))
43 .replace(".d/", ".d.hg/"))
44
44
45 encodedir = getattr(parsers, 'encodedir', _encodedir)
45 encodedir = getattr(parsers, 'encodedir', _encodedir)
46
46
47 def decodedir(path):
47 def decodedir(path):
48 '''
48 '''
49 >>> decodedir(b'data/foo.i')
49 >>> decodedir(b'data/foo.i')
50 'data/foo.i'
50 'data/foo.i'
51 >>> decodedir(b'data/foo.i.hg/bla.i')
51 >>> decodedir(b'data/foo.i.hg/bla.i')
52 'data/foo.i/bla.i'
52 'data/foo.i/bla.i'
53 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
53 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
54 'data/foo.i.hg/bla.i'
54 'data/foo.i.hg/bla.i'
55 '''
55 '''
56 if ".hg/" not in path:
56 if ".hg/" not in path:
57 return path
57 return path
58 return (path
58 return (path
59 .replace(".d.hg/", ".d/")
59 .replace(".d.hg/", ".d/")
60 .replace(".i.hg/", ".i/")
60 .replace(".i.hg/", ".i/")
61 .replace(".hg.hg/", ".hg/"))
61 .replace(".hg.hg/", ".hg/"))
62
62
63 def _reserved():
63 def _reserved():
64 ''' characters that are problematic for filesystems
64 ''' characters that are problematic for filesystems
65
65
66 * ascii escapes (0..31)
66 * ascii escapes (0..31)
67 * ascii hi (126..255)
67 * ascii hi (126..255)
68 * windows specials
68 * windows specials
69
69
70 these characters will be escaped by encodefunctions
70 these characters will be escaped by encodefunctions
71 '''
71 '''
72 winreserved = [ord(x) for x in u'\\:*?"<>|']
72 winreserved = [ord(x) for x in u'\\:*?"<>|']
73 for x in range(32):
73 for x in range(32):
74 yield x
74 yield x
75 for x in range(126, 256):
75 for x in range(126, 256):
76 yield x
76 yield x
77 for x in winreserved:
77 for x in winreserved:
78 yield x
78 yield x
79
79
80 def _buildencodefun():
80 def _buildencodefun():
81 '''
81 '''
82 >>> enc, dec = _buildencodefun()
82 >>> enc, dec = _buildencodefun()
83
83
84 >>> enc(b'nothing/special.txt')
84 >>> enc(b'nothing/special.txt')
85 'nothing/special.txt'
85 'nothing/special.txt'
86 >>> dec(b'nothing/special.txt')
86 >>> dec(b'nothing/special.txt')
87 'nothing/special.txt'
87 'nothing/special.txt'
88
88
89 >>> enc(b'HELLO')
89 >>> enc(b'HELLO')
90 '_h_e_l_l_o'
90 '_h_e_l_l_o'
91 >>> dec(b'_h_e_l_l_o')
91 >>> dec(b'_h_e_l_l_o')
92 'HELLO'
92 'HELLO'
93
93
94 >>> enc(b'hello:world?')
94 >>> enc(b'hello:world?')
95 'hello~3aworld~3f'
95 'hello~3aworld~3f'
96 >>> dec(b'hello~3aworld~3f')
96 >>> dec(b'hello~3aworld~3f')
97 'hello:world?'
97 'hello:world?'
98
98
99 >>> enc(b'the\\x07quick\\xADshot')
99 >>> enc(b'the\\x07quick\\xADshot')
100 'the~07quick~adshot'
100 'the~07quick~adshot'
101 >>> dec(b'the~07quick~adshot')
101 >>> dec(b'the~07quick~adshot')
102 'the\\x07quick\\xadshot'
102 'the\\x07quick\\xadshot'
103 '''
103 '''
104 e = '_'
104 e = '_'
105 xchr = pycompat.bytechr
105 xchr = pycompat.bytechr
106 asciistr = list(map(xchr, range(127)))
106 asciistr = list(map(xchr, range(127)))
107 capitals = list(range(ord("A"), ord("Z") + 1))
107 capitals = list(range(ord("A"), ord("Z") + 1))
108
108
109 cmap = dict((x, x) for x in asciistr)
109 cmap = dict((x, x) for x in asciistr)
110 for x in _reserved():
110 for x in _reserved():
111 cmap[xchr(x)] = "~%02x" % x
111 cmap[xchr(x)] = "~%02x" % x
112 for x in capitals + [ord(e)]:
112 for x in capitals + [ord(e)]:
113 cmap[xchr(x)] = e + xchr(x).lower()
113 cmap[xchr(x)] = e + xchr(x).lower()
114
114
115 dmap = {}
115 dmap = {}
116 for k, v in cmap.iteritems():
116 for k, v in cmap.iteritems():
117 dmap[v] = k
117 dmap[v] = k
118 def decode(s):
118 def decode(s):
119 i = 0
119 i = 0
120 while i < len(s):
120 while i < len(s):
121 for l in pycompat.xrange(1, 4):
121 for l in pycompat.xrange(1, 4):
122 try:
122 try:
123 yield dmap[s[i:i + l]]
123 yield dmap[s[i:i + l]]
124 i += l
124 i += l
125 break
125 break
126 except KeyError:
126 except KeyError:
127 pass
127 pass
128 else:
128 else:
129 raise KeyError
129 raise KeyError
130 return (lambda s: ''.join([cmap[s[c:c + 1]]
130 return (lambda s: ''.join([cmap[s[c:c + 1]]
131 for c in pycompat.xrange(len(s))]),
131 for c in pycompat.xrange(len(s))]),
132 lambda s: ''.join(list(decode(s))))
132 lambda s: ''.join(list(decode(s))))
133
133
134 _encodefname, _decodefname = _buildencodefun()
134 _encodefname, _decodefname = _buildencodefun()
135
135
136 def encodefilename(s):
136 def encodefilename(s):
137 '''
137 '''
138 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
138 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
139 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
139 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
140 '''
140 '''
141 return _encodefname(encodedir(s))
141 return _encodefname(encodedir(s))
142
142
143 def decodefilename(s):
143 def decodefilename(s):
144 '''
144 '''
145 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
145 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
146 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
146 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
147 '''
147 '''
148 return decodedir(_decodefname(s))
148 return decodedir(_decodefname(s))
149
149
150 def _buildlowerencodefun():
150 def _buildlowerencodefun():
151 '''
151 '''
152 >>> f = _buildlowerencodefun()
152 >>> f = _buildlowerencodefun()
153 >>> f(b'nothing/special.txt')
153 >>> f(b'nothing/special.txt')
154 'nothing/special.txt'
154 'nothing/special.txt'
155 >>> f(b'HELLO')
155 >>> f(b'HELLO')
156 'hello'
156 'hello'
157 >>> f(b'hello:world?')
157 >>> f(b'hello:world?')
158 'hello~3aworld~3f'
158 'hello~3aworld~3f'
159 >>> f(b'the\\x07quick\\xADshot')
159 >>> f(b'the\\x07quick\\xADshot')
160 'the~07quick~adshot'
160 'the~07quick~adshot'
161 '''
161 '''
162 xchr = pycompat.bytechr
162 xchr = pycompat.bytechr
163 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
163 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
164 for x in _reserved():
164 for x in _reserved():
165 cmap[xchr(x)] = "~%02x" % x
165 cmap[xchr(x)] = "~%02x" % x
166 for x in range(ord("A"), ord("Z") + 1):
166 for x in range(ord("A"), ord("Z") + 1):
167 cmap[xchr(x)] = xchr(x).lower()
167 cmap[xchr(x)] = xchr(x).lower()
168 def lowerencode(s):
168 def lowerencode(s):
169 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
169 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
170 return lowerencode
170 return lowerencode
171
171
172 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
172 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
173
173
174 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
174 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
175 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
175 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
176 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
176 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
177 def _auxencode(path, dotencode):
177 def _auxencode(path, dotencode):
178 '''
178 '''
179 Encodes filenames containing names reserved by Windows or which end in
179 Encodes filenames containing names reserved by Windows or which end in
180 period or space. Does not touch other single reserved characters c.
180 period or space. Does not touch other single reserved characters c.
181 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
181 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
182 Additionally encodes space or period at the beginning, if dotencode is
182 Additionally encodes space or period at the beginning, if dotencode is
183 True. Parameter path is assumed to be all lowercase.
183 True. Parameter path is assumed to be all lowercase.
184 A segment only needs encoding if a reserved name appears as a
184 A segment only needs encoding if a reserved name appears as a
185 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
185 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
186 doesn't need encoding.
186 doesn't need encoding.
187
187
188 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
188 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
189 >>> _auxencode(s.split(b'/'), True)
189 >>> _auxencode(s.split(b'/'), True)
190 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
190 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
191 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
191 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
192 >>> _auxencode(s.split(b'/'), False)
192 >>> _auxencode(s.split(b'/'), False)
193 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
193 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
194 >>> _auxencode([b'foo. '], True)
194 >>> _auxencode([b'foo. '], True)
195 ['foo.~20']
195 ['foo.~20']
196 >>> _auxencode([b' .foo'], True)
196 >>> _auxencode([b' .foo'], True)
197 ['~20.foo']
197 ['~20.foo']
198 '''
198 '''
199 for i, n in enumerate(path):
199 for i, n in enumerate(path):
200 if not n:
200 if not n:
201 continue
201 continue
202 if dotencode and n[0] in '. ':
202 if dotencode and n[0] in '. ':
203 n = "~%02x" % ord(n[0:1]) + n[1:]
203 n = "~%02x" % ord(n[0:1]) + n[1:]
204 path[i] = n
204 path[i] = n
205 else:
205 else:
206 l = n.find('.')
206 l = n.find('.')
207 if l == -1:
207 if l == -1:
208 l = len(n)
208 l = len(n)
209 if ((l == 3 and n[:3] in _winres3) or
209 if ((l == 3 and n[:3] in _winres3) or
210 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
210 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
211 and n[:3] in _winres4)):
211 and n[:3] in _winres4)):
212 # encode third letter ('aux' -> 'au~78')
212 # encode third letter ('aux' -> 'au~78')
213 ec = "~%02x" % ord(n[2:3])
213 ec = "~%02x" % ord(n[2:3])
214 n = n[0:2] + ec + n[3:]
214 n = n[0:2] + ec + n[3:]
215 path[i] = n
215 path[i] = n
216 if n[-1] in '. ':
216 if n[-1] in '. ':
217 # encode last period or space ('foo...' -> 'foo..~2e')
217 # encode last period or space ('foo...' -> 'foo..~2e')
218 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
218 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
219 return path
219 return path
220
220
221 _maxstorepathlen = 120
221 _maxstorepathlen = 120
222 _dirprefixlen = 8
222 _dirprefixlen = 8
223 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
223 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
224
224
225 def _hashencode(path, dotencode):
225 def _hashencode(path, dotencode):
226 digest = node.hex(hashlib.sha1(path).digest())
226 digest = node.hex(hashlib.sha1(path).digest())
227 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
227 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
228 parts = _auxencode(le, dotencode)
228 parts = _auxencode(le, dotencode)
229 basename = parts[-1]
229 basename = parts[-1]
230 _root, ext = os.path.splitext(basename)
230 _root, ext = os.path.splitext(basename)
231 sdirs = []
231 sdirs = []
232 sdirslen = 0
232 sdirslen = 0
233 for p in parts[:-1]:
233 for p in parts[:-1]:
234 d = p[:_dirprefixlen]
234 d = p[:_dirprefixlen]
235 if d[-1] in '. ':
235 if d[-1] in '. ':
236 # Windows can't access dirs ending in period or space
236 # Windows can't access dirs ending in period or space
237 d = d[:-1] + '_'
237 d = d[:-1] + '_'
238 if sdirslen == 0:
238 if sdirslen == 0:
239 t = len(d)
239 t = len(d)
240 else:
240 else:
241 t = sdirslen + 1 + len(d)
241 t = sdirslen + 1 + len(d)
242 if t > _maxshortdirslen:
242 if t > _maxshortdirslen:
243 break
243 break
244 sdirs.append(d)
244 sdirs.append(d)
245 sdirslen = t
245 sdirslen = t
246 dirs = '/'.join(sdirs)
246 dirs = '/'.join(sdirs)
247 if len(dirs) > 0:
247 if len(dirs) > 0:
248 dirs += '/'
248 dirs += '/'
249 res = 'dh/' + dirs + digest + ext
249 res = 'dh/' + dirs + digest + ext
250 spaceleft = _maxstorepathlen - len(res)
250 spaceleft = _maxstorepathlen - len(res)
251 if spaceleft > 0:
251 if spaceleft > 0:
252 filler = basename[:spaceleft]
252 filler = basename[:spaceleft]
253 res = 'dh/' + dirs + filler + digest + ext
253 res = 'dh/' + dirs + filler + digest + ext
254 return res
254 return res
255
255
256 def _hybridencode(path, dotencode):
256 def _hybridencode(path, dotencode):
257 '''encodes path with a length limit
257 '''encodes path with a length limit
258
258
259 Encodes all paths that begin with 'data/', according to the following.
259 Encodes all paths that begin with 'data/', according to the following.
260
260
261 Default encoding (reversible):
261 Default encoding (reversible):
262
262
263 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
263 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
264 characters are encoded as '~xx', where xx is the two digit hex code
264 characters are encoded as '~xx', where xx is the two digit hex code
265 of the character (see encodefilename).
265 of the character (see encodefilename).
266 Relevant path components consisting of Windows reserved filenames are
266 Relevant path components consisting of Windows reserved filenames are
267 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
267 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
268
268
269 Hashed encoding (not reversible):
269 Hashed encoding (not reversible):
270
270
271 If the default-encoded path is longer than _maxstorepathlen, a
271 If the default-encoded path is longer than _maxstorepathlen, a
272 non-reversible hybrid hashing of the path is done instead.
272 non-reversible hybrid hashing of the path is done instead.
273 This encoding uses up to _dirprefixlen characters of all directory
273 This encoding uses up to _dirprefixlen characters of all directory
274 levels of the lowerencoded path, but not more levels than can fit into
274 levels of the lowerencoded path, but not more levels than can fit into
275 _maxshortdirslen.
275 _maxshortdirslen.
276 Then follows the filler followed by the sha digest of the full path.
276 Then follows the filler followed by the sha digest of the full path.
277 The filler is the beginning of the basename of the lowerencoded path
277 The filler is the beginning of the basename of the lowerencoded path
278 (the basename is everything after the last path separator). The filler
278 (the basename is everything after the last path separator). The filler
279 is as long as possible, filling in characters from the basename until
279 is as long as possible, filling in characters from the basename until
280 the encoded path has _maxstorepathlen characters (or all chars of the
280 the encoded path has _maxstorepathlen characters (or all chars of the
281 basename have been taken).
281 basename have been taken).
282 The extension (e.g. '.i' or '.d') is preserved.
282 The extension (e.g. '.i' or '.d') is preserved.
283
283
284 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
284 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
285 encoding was used.
285 encoding was used.
286 '''
286 '''
287 path = encodedir(path)
287 path = encodedir(path)
288 ef = _encodefname(path).split('/')
288 ef = _encodefname(path).split('/')
289 res = '/'.join(_auxencode(ef, dotencode))
289 res = '/'.join(_auxencode(ef, dotencode))
290 if len(res) > _maxstorepathlen:
290 if len(res) > _maxstorepathlen:
291 res = _hashencode(path, dotencode)
291 res = _hashencode(path, dotencode)
292 return res
292 return res
293
293
294 def _pathencode(path):
294 def _pathencode(path):
295 de = encodedir(path)
295 de = encodedir(path)
296 if len(path) > _maxstorepathlen:
296 if len(path) > _maxstorepathlen:
297 return _hashencode(de, True)
297 return _hashencode(de, True)
298 ef = _encodefname(de).split('/')
298 ef = _encodefname(de).split('/')
299 res = '/'.join(_auxencode(ef, True))
299 res = '/'.join(_auxencode(ef, True))
300 if len(res) > _maxstorepathlen:
300 if len(res) > _maxstorepathlen:
301 return _hashencode(de, True)
301 return _hashencode(de, True)
302 return res
302 return res
303
303
304 _pathencode = getattr(parsers, 'pathencode', _pathencode)
304 _pathencode = getattr(parsers, 'pathencode', _pathencode)
305
305
306 def _plainhybridencode(f):
306 def _plainhybridencode(f):
307 return _hybridencode(f, False)
307 return _hybridencode(f, False)
308
308
309 def _calcmode(vfs):
309 def _calcmode(vfs):
310 try:
310 try:
311 # files in .hg/ will be created using this mode
311 # files in .hg/ will be created using this mode
312 mode = vfs.stat().st_mode
312 mode = vfs.stat().st_mode
313 # avoid some useless chmods
313 # avoid some useless chmods
314 if (0o777 & ~util.umask) == (0o777 & mode):
314 if (0o777 & ~util.umask) == (0o777 & mode):
315 mode = None
315 mode = None
316 except OSError:
316 except OSError:
317 mode = None
317 mode = None
318 return mode
318 return mode
319
319
320 _data = ('data meta 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
320 _data = ('narrowspec data meta 00manifest.d 00manifest.i'
321 ' phaseroots obsstore')
321 ' 00changelog.d 00changelog.i phaseroots obsstore')
322
322
323 def isrevlog(f, kind, st):
323 def isrevlog(f, kind, st):
324 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
324 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
325
325
326 class basicstore(object):
326 class basicstore(object):
327 '''base class for local repository stores'''
327 '''base class for local repository stores'''
328 def __init__(self, path, vfstype):
328 def __init__(self, path, vfstype):
329 vfs = vfstype(path)
329 vfs = vfstype(path)
330 self.path = vfs.base
330 self.path = vfs.base
331 self.createmode = _calcmode(vfs)
331 self.createmode = _calcmode(vfs)
332 vfs.createmode = self.createmode
332 vfs.createmode = self.createmode
333 self.rawvfs = vfs
333 self.rawvfs = vfs
334 self.vfs = vfsmod.filtervfs(vfs, encodedir)
334 self.vfs = vfsmod.filtervfs(vfs, encodedir)
335 self.opener = self.vfs
335 self.opener = self.vfs
336
336
337 def join(self, f):
337 def join(self, f):
338 return self.path + '/' + encodedir(f)
338 return self.path + '/' + encodedir(f)
339
339
340 def _walk(self, relpath, recurse, filefilter=isrevlog):
340 def _walk(self, relpath, recurse, filefilter=isrevlog):
341 '''yields (unencoded, encoded, size)'''
341 '''yields (unencoded, encoded, size)'''
342 path = self.path
342 path = self.path
343 if relpath:
343 if relpath:
344 path += '/' + relpath
344 path += '/' + relpath
345 striplen = len(self.path) + 1
345 striplen = len(self.path) + 1
346 l = []
346 l = []
347 if self.rawvfs.isdir(path):
347 if self.rawvfs.isdir(path):
348 visit = [path]
348 visit = [path]
349 readdir = self.rawvfs.readdir
349 readdir = self.rawvfs.readdir
350 while visit:
350 while visit:
351 p = visit.pop()
351 p = visit.pop()
352 for f, kind, st in readdir(p, stat=True):
352 for f, kind, st in readdir(p, stat=True):
353 fp = p + '/' + f
353 fp = p + '/' + f
354 if filefilter(f, kind, st):
354 if filefilter(f, kind, st):
355 n = util.pconvert(fp[striplen:])
355 n = util.pconvert(fp[striplen:])
356 l.append((decodedir(n), n, st.st_size))
356 l.append((decodedir(n), n, st.st_size))
357 elif kind == stat.S_IFDIR and recurse:
357 elif kind == stat.S_IFDIR and recurse:
358 visit.append(fp)
358 visit.append(fp)
359 l.sort()
359 l.sort()
360 return l
360 return l
361
361
362 def datafiles(self):
362 def datafiles(self):
363 return self._walk('data', True) + self._walk('meta', True)
363 return self._walk('data', True) + self._walk('meta', True)
364
364
365 def topfiles(self):
365 def topfiles(self):
366 # yield manifest before changelog
366 # yield manifest before changelog
367 return reversed(self._walk('', False))
367 return reversed(self._walk('', False))
368
368
369 def walk(self):
369 def walk(self):
370 '''yields (unencoded, encoded, size)'''
370 '''yields (unencoded, encoded, size)'''
371 # yield data files first
371 # yield data files first
372 for x in self.datafiles():
372 for x in self.datafiles():
373 yield x
373 yield x
374 for x in self.topfiles():
374 for x in self.topfiles():
375 yield x
375 yield x
376
376
377 def copylist(self):
377 def copylist(self):
378 return ['requires'] + _data.split()
378 return ['requires'] + _data.split()
379
379
380 def write(self, tr):
380 def write(self, tr):
381 pass
381 pass
382
382
383 def invalidatecaches(self):
383 def invalidatecaches(self):
384 pass
384 pass
385
385
386 def markremoved(self, fn):
386 def markremoved(self, fn):
387 pass
387 pass
388
388
389 def __contains__(self, path):
389 def __contains__(self, path):
390 '''Checks if the store contains path'''
390 '''Checks if the store contains path'''
391 path = "/".join(("data", path))
391 path = "/".join(("data", path))
392 # file?
392 # file?
393 if self.vfs.exists(path + ".i"):
393 if self.vfs.exists(path + ".i"):
394 return True
394 return True
395 # dir?
395 # dir?
396 if not path.endswith("/"):
396 if not path.endswith("/"):
397 path = path + "/"
397 path = path + "/"
398 return self.vfs.exists(path)
398 return self.vfs.exists(path)
399
399
400 class encodedstore(basicstore):
400 class encodedstore(basicstore):
401 def __init__(self, path, vfstype):
401 def __init__(self, path, vfstype):
402 vfs = vfstype(path + '/store')
402 vfs = vfstype(path + '/store')
403 self.path = vfs.base
403 self.path = vfs.base
404 self.createmode = _calcmode(vfs)
404 self.createmode = _calcmode(vfs)
405 vfs.createmode = self.createmode
405 vfs.createmode = self.createmode
406 self.rawvfs = vfs
406 self.rawvfs = vfs
407 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
407 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
408 self.opener = self.vfs
408 self.opener = self.vfs
409
409
410 def datafiles(self):
410 def datafiles(self):
411 for a, b, size in super(encodedstore, self).datafiles():
411 for a, b, size in super(encodedstore, self).datafiles():
412 try:
412 try:
413 a = decodefilename(a)
413 a = decodefilename(a)
414 except KeyError:
414 except KeyError:
415 a = None
415 a = None
416 yield a, b, size
416 yield a, b, size
417
417
418 def join(self, f):
418 def join(self, f):
419 return self.path + '/' + encodefilename(f)
419 return self.path + '/' + encodefilename(f)
420
420
421 def copylist(self):
421 def copylist(self):
422 return (['requires', '00changelog.i'] +
422 return (['requires', '00changelog.i'] +
423 ['store/' + f for f in _data.split()])
423 ['store/' + f for f in _data.split()])
424
424
425 class fncache(object):
425 class fncache(object):
426 # the filename used to be partially encoded
426 # the filename used to be partially encoded
427 # hence the encodedir/decodedir dance
427 # hence the encodedir/decodedir dance
428 def __init__(self, vfs):
428 def __init__(self, vfs):
429 self.vfs = vfs
429 self.vfs = vfs
430 self.entries = None
430 self.entries = None
431 self._dirty = False
431 self._dirty = False
432
432
433 def _load(self):
433 def _load(self):
434 '''fill the entries from the fncache file'''
434 '''fill the entries from the fncache file'''
435 self._dirty = False
435 self._dirty = False
436 try:
436 try:
437 fp = self.vfs('fncache', mode='rb')
437 fp = self.vfs('fncache', mode='rb')
438 except IOError:
438 except IOError:
439 # skip nonexistent file
439 # skip nonexistent file
440 self.entries = set()
440 self.entries = set()
441 return
441 return
442 self.entries = set(decodedir(fp.read()).splitlines())
442 self.entries = set(decodedir(fp.read()).splitlines())
443 if '' in self.entries:
443 if '' in self.entries:
444 fp.seek(0)
444 fp.seek(0)
445 for n, line in enumerate(util.iterfile(fp)):
445 for n, line in enumerate(util.iterfile(fp)):
446 if not line.rstrip('\n'):
446 if not line.rstrip('\n'):
447 t = _('invalid entry in fncache, line %d') % (n + 1)
447 t = _('invalid entry in fncache, line %d') % (n + 1)
448 raise error.Abort(t)
448 raise error.Abort(t)
449 fp.close()
449 fp.close()
450
450
451 def write(self, tr):
451 def write(self, tr):
452 if self._dirty:
452 if self._dirty:
453 assert self.entries is not None
453 assert self.entries is not None
454 tr.addbackup('fncache')
454 tr.addbackup('fncache')
455 fp = self.vfs('fncache', mode='wb', atomictemp=True)
455 fp = self.vfs('fncache', mode='wb', atomictemp=True)
456 if self.entries:
456 if self.entries:
457 fp.write(encodedir('\n'.join(self.entries) + '\n'))
457 fp.write(encodedir('\n'.join(self.entries) + '\n'))
458 fp.close()
458 fp.close()
459 self._dirty = False
459 self._dirty = False
460
460
461 def add(self, fn):
461 def add(self, fn):
462 if self.entries is None:
462 if self.entries is None:
463 self._load()
463 self._load()
464 if fn not in self.entries:
464 if fn not in self.entries:
465 self._dirty = True
465 self._dirty = True
466 self.entries.add(fn)
466 self.entries.add(fn)
467
467
468 def remove(self, fn):
468 def remove(self, fn):
469 if self.entries is None:
469 if self.entries is None:
470 self._load()
470 self._load()
471 try:
471 try:
472 self.entries.remove(fn)
472 self.entries.remove(fn)
473 self._dirty = True
473 self._dirty = True
474 except KeyError:
474 except KeyError:
475 pass
475 pass
476
476
477 def __contains__(self, fn):
477 def __contains__(self, fn):
478 if self.entries is None:
478 if self.entries is None:
479 self._load()
479 self._load()
480 return fn in self.entries
480 return fn in self.entries
481
481
482 def __iter__(self):
482 def __iter__(self):
483 if self.entries is None:
483 if self.entries is None:
484 self._load()
484 self._load()
485 return iter(self.entries)
485 return iter(self.entries)
486
486
487 class _fncachevfs(vfsmod.abstractvfs, vfsmod.proxyvfs):
487 class _fncachevfs(vfsmod.abstractvfs, vfsmod.proxyvfs):
488 def __init__(self, vfs, fnc, encode):
488 def __init__(self, vfs, fnc, encode):
489 vfsmod.proxyvfs.__init__(self, vfs)
489 vfsmod.proxyvfs.__init__(self, vfs)
490 self.fncache = fnc
490 self.fncache = fnc
491 self.encode = encode
491 self.encode = encode
492
492
493 def __call__(self, path, mode='r', *args, **kw):
493 def __call__(self, path, mode='r', *args, **kw):
494 encoded = self.encode(path)
494 encoded = self.encode(path)
495 if mode not in ('r', 'rb') and (path.startswith('data/') or
495 if mode not in ('r', 'rb') and (path.startswith('data/') or
496 path.startswith('meta/')):
496 path.startswith('meta/')):
497 # do not trigger a fncache load when adding a file that already is
497 # do not trigger a fncache load when adding a file that already is
498 # known to exist.
498 # known to exist.
499 notload = self.fncache.entries is None and self.vfs.exists(encoded)
499 notload = self.fncache.entries is None and self.vfs.exists(encoded)
500 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
500 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
501 # when appending to an existing file, if the file has size zero,
501 # when appending to an existing file, if the file has size zero,
502 # it should be considered as missing. Such zero-size files are
502 # it should be considered as missing. Such zero-size files are
503 # the result of truncation when a transaction is aborted.
503 # the result of truncation when a transaction is aborted.
504 notload = False
504 notload = False
505 if not notload:
505 if not notload:
506 self.fncache.add(path)
506 self.fncache.add(path)
507 return self.vfs(encoded, mode, *args, **kw)
507 return self.vfs(encoded, mode, *args, **kw)
508
508
509 def join(self, path):
509 def join(self, path):
510 if path:
510 if path:
511 return self.vfs.join(self.encode(path))
511 return self.vfs.join(self.encode(path))
512 else:
512 else:
513 return self.vfs.join(path)
513 return self.vfs.join(path)
514
514
515 class fncachestore(basicstore):
515 class fncachestore(basicstore):
516 def __init__(self, path, vfstype, dotencode):
516 def __init__(self, path, vfstype, dotencode):
517 if dotencode:
517 if dotencode:
518 encode = _pathencode
518 encode = _pathencode
519 else:
519 else:
520 encode = _plainhybridencode
520 encode = _plainhybridencode
521 self.encode = encode
521 self.encode = encode
522 vfs = vfstype(path + '/store')
522 vfs = vfstype(path + '/store')
523 self.path = vfs.base
523 self.path = vfs.base
524 self.pathsep = self.path + '/'
524 self.pathsep = self.path + '/'
525 self.createmode = _calcmode(vfs)
525 self.createmode = _calcmode(vfs)
526 vfs.createmode = self.createmode
526 vfs.createmode = self.createmode
527 self.rawvfs = vfs
527 self.rawvfs = vfs
528 fnc = fncache(vfs)
528 fnc = fncache(vfs)
529 self.fncache = fnc
529 self.fncache = fnc
530 self.vfs = _fncachevfs(vfs, fnc, encode)
530 self.vfs = _fncachevfs(vfs, fnc, encode)
531 self.opener = self.vfs
531 self.opener = self.vfs
532
532
533 def join(self, f):
533 def join(self, f):
534 return self.pathsep + self.encode(f)
534 return self.pathsep + self.encode(f)
535
535
536 def getsize(self, path):
536 def getsize(self, path):
537 return self.rawvfs.stat(path).st_size
537 return self.rawvfs.stat(path).st_size
538
538
539 def datafiles(self):
539 def datafiles(self):
540 for f in sorted(self.fncache):
540 for f in sorted(self.fncache):
541 ef = self.encode(f)
541 ef = self.encode(f)
542 try:
542 try:
543 yield f, ef, self.getsize(ef)
543 yield f, ef, self.getsize(ef)
544 except OSError as err:
544 except OSError as err:
545 if err.errno != errno.ENOENT:
545 if err.errno != errno.ENOENT:
546 raise
546 raise
547
547
548 def copylist(self):
548 def copylist(self):
549 d = ('data meta dh fncache phaseroots obsstore'
549 d = ('narrowspec data meta dh fncache phaseroots obsstore'
550 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
550 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
551 return (['requires', '00changelog.i'] +
551 return (['requires', '00changelog.i'] +
552 ['store/' + f for f in d.split()])
552 ['store/' + f for f in d.split()])
553
553
554 def write(self, tr):
554 def write(self, tr):
555 self.fncache.write(tr)
555 self.fncache.write(tr)
556
556
557 def invalidatecaches(self):
557 def invalidatecaches(self):
558 self.fncache.entries = None
558 self.fncache.entries = None
559
559
560 def markremoved(self, fn):
560 def markremoved(self, fn):
561 self.fncache.remove(fn)
561 self.fncache.remove(fn)
562
562
563 def _exists(self, f):
563 def _exists(self, f):
564 ef = self.encode(f)
564 ef = self.encode(f)
565 try:
565 try:
566 self.getsize(ef)
566 self.getsize(ef)
567 return True
567 return True
568 except OSError as err:
568 except OSError as err:
569 if err.errno != errno.ENOENT:
569 if err.errno != errno.ENOENT:
570 raise
570 raise
571 # nonexistent entry
571 # nonexistent entry
572 return False
572 return False
573
573
574 def __contains__(self, path):
574 def __contains__(self, path):
575 '''Checks if the store contains path'''
575 '''Checks if the store contains path'''
576 path = "/".join(("data", path))
576 path = "/".join(("data", path))
577 # check for files (exact match)
577 # check for files (exact match)
578 e = path + '.i'
578 e = path + '.i'
579 if e in self.fncache and self._exists(e):
579 if e in self.fncache and self._exists(e):
580 return True
580 return True
581 # now check for directories (prefix match)
581 # now check for directories (prefix match)
582 if not path.endswith('/'):
582 if not path.endswith('/'):
583 path += '/'
583 path += '/'
584 for e in self.fncache:
584 for e in self.fncache:
585 if e.startswith(path) and self._exists(e):
585 if e.startswith(path) and self._exists(e):
586 return True
586 return True
587 return False
587 return False
588
588
589 def store(requirements, path, vfstype):
589 def store(requirements, path, vfstype):
590 if 'store' in requirements:
590 if 'store' in requirements:
591 if 'fncache' in requirements:
591 if 'fncache' in requirements:
592 return fncachestore(path, vfstype, 'dotencode' in requirements)
592 return fncachestore(path, vfstype, 'dotencode' in requirements)
593 return encodedstore(path, vfstype)
593 return encodedstore(path, vfstype)
594 return basicstore(path, vfstype)
594 return basicstore(path, vfstype)
@@ -1,43 +1,43 b''
1 $ . "$TESTDIR/narrow-library.sh"
1 $ . "$TESTDIR/narrow-library.sh"
2 $ hg init repo
2 $ hg init repo
3 $ cd repo
3 $ cd repo
4 $ cat << EOF > .hg/narrowspec
4 $ cat << EOF > .hg/store/narrowspec
5 > [include]
5 > [include]
6 > path:foo
6 > path:foo
7 > [exclude]
7 > [exclude]
8 > EOF
8 > EOF
9 $ echo treemanifest >> .hg/requires
9 $ echo treemanifest >> .hg/requires
10 $ echo narrowhg-experimental >> .hg/requires
10 $ echo narrowhg-experimental >> .hg/requires
11 $ mkdir -p foo/bar
11 $ mkdir -p foo/bar
12 $ echo b > foo/f
12 $ echo b > foo/f
13 $ echo c > foo/bar/f
13 $ echo c > foo/bar/f
14 $ hg commit -Am hi
14 $ hg commit -Am hi
15 adding foo/bar/f
15 adding foo/bar/f
16 adding foo/f
16 adding foo/f
17 $ hg debugindex -m
17 $ hg debugindex -m
18 rev linkrev nodeid p1 p2
18 rev linkrev nodeid p1 p2
19 0 0 14a5d056d75a 000000000000 000000000000
19 0 0 14a5d056d75a 000000000000 000000000000
20 $ hg debugindex --dir foo
20 $ hg debugindex --dir foo
21 rev linkrev nodeid p1 p2
21 rev linkrev nodeid p1 p2
22 0 0 e635c7857aef 000000000000 000000000000
22 0 0 e635c7857aef 000000000000 000000000000
23 $ hg debugindex --dir foo/
23 $ hg debugindex --dir foo/
24 rev linkrev nodeid p1 p2
24 rev linkrev nodeid p1 p2
25 0 0 e635c7857aef 000000000000 000000000000
25 0 0 e635c7857aef 000000000000 000000000000
26 $ hg debugindex --dir foo/bar
26 $ hg debugindex --dir foo/bar
27 rev linkrev nodeid p1 p2
27 rev linkrev nodeid p1 p2
28 0 0 e091d4224761 000000000000 000000000000
28 0 0 e091d4224761 000000000000 000000000000
29 $ hg debugindex --dir foo/bar/
29 $ hg debugindex --dir foo/bar/
30 rev linkrev nodeid p1 p2
30 rev linkrev nodeid p1 p2
31 0 0 e091d4224761 000000000000 000000000000
31 0 0 e091d4224761 000000000000 000000000000
32 $ hg debugdata -m 0
32 $ hg debugdata -m 0
33 foo\x00e635c7857aef92ac761ce5741a99da159abbbb24t (esc)
33 foo\x00e635c7857aef92ac761ce5741a99da159abbbb24t (esc)
34 $ hg debugdata --dir foo 0
34 $ hg debugdata --dir foo 0
35 bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
35 bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
36 f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
36 f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
37 $ hg debugdata --dir foo/ 0
37 $ hg debugdata --dir foo/ 0
38 bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
38 bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
39 f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
39 f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
40 $ hg debugdata --dir foo/bar 0
40 $ hg debugdata --dir foo/bar 0
41 f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
41 f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
42 $ hg debugdata --dir foo/bar/ 0
42 $ hg debugdata --dir foo/bar/ 0
43 f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
43 f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
@@ -1,175 +1,174 b''
1 $ . "$TESTDIR/narrow-library.sh"
1 $ . "$TESTDIR/narrow-library.sh"
2
2
3 $ hg init master
3 $ hg init master
4 $ cd master
4 $ cd master
5 $ cat >> .hg/hgrc <<EOF
5 $ cat >> .hg/hgrc <<EOF
6 > [narrow]
6 > [narrow]
7 > serveellipses=True
7 > serveellipses=True
8 > EOF
8 > EOF
9 $ for x in `$TESTDIR/seq.py 10`
9 $ for x in `$TESTDIR/seq.py 10`
10 > do
10 > do
11 > echo $x > "f$x"
11 > echo $x > "f$x"
12 > hg add "f$x"
12 > hg add "f$x"
13 > hg commit -m "Commit f$x"
13 > hg commit -m "Commit f$x"
14 > done
14 > done
15 $ cd ..
15 $ cd ..
16
16
17 narrow clone a couple files, f2 and f8
17 narrow clone a couple files, f2 and f8
18
18
19 $ hg clone --narrow ssh://user@dummy/master narrow --include "f2" --include "f8"
19 $ hg clone --narrow ssh://user@dummy/master narrow --include "f2" --include "f8"
20 requesting all changes
20 requesting all changes
21 adding changesets
21 adding changesets
22 adding manifests
22 adding manifests
23 adding file changes
23 adding file changes
24 added 5 changesets with 2 changes to 2 files
24 added 5 changesets with 2 changes to 2 files
25 new changesets *:* (glob)
25 new changesets *:* (glob)
26 updating to branch default
26 updating to branch default
27 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 $ cd narrow
28 $ cd narrow
29 $ ls
29 $ ls
30 f2
30 f2
31 f8
31 f8
32 $ cat f2 f8
32 $ cat f2 f8
33 2
33 2
34 8
34 8
35
35
36 $ cd ..
36 $ cd ..
37
37
38 change every upstream file twice
38 change every upstream file twice
39
39
40 $ cd master
40 $ cd master
41 $ for x in `$TESTDIR/seq.py 10`
41 $ for x in `$TESTDIR/seq.py 10`
42 > do
42 > do
43 > echo "update#1 $x" >> "f$x"
43 > echo "update#1 $x" >> "f$x"
44 > hg commit -m "Update#1 to f$x" "f$x"
44 > hg commit -m "Update#1 to f$x" "f$x"
45 > done
45 > done
46 $ for x in `$TESTDIR/seq.py 10`
46 $ for x in `$TESTDIR/seq.py 10`
47 > do
47 > do
48 > echo "update#2 $x" >> "f$x"
48 > echo "update#2 $x" >> "f$x"
49 > hg commit -m "Update#2 to f$x" "f$x"
49 > hg commit -m "Update#2 to f$x" "f$x"
50 > done
50 > done
51 $ cd ..
51 $ cd ..
52
52
53 look for incoming changes
53 look for incoming changes
54
54
55 $ cd narrow
55 $ cd narrow
56 $ hg incoming --limit 3
56 $ hg incoming --limit 3
57 comparing with ssh://user@dummy/master
57 comparing with ssh://user@dummy/master
58 searching for changes
58 searching for changes
59 changeset: 5:ddc055582556
59 changeset: 5:ddc055582556
60 user: test
60 user: test
61 date: Thu Jan 01 00:00:00 1970 +0000
61 date: Thu Jan 01 00:00:00 1970 +0000
62 summary: Update#1 to f1
62 summary: Update#1 to f1
63
63
64 changeset: 6:f66eb5ad621d
64 changeset: 6:f66eb5ad621d
65 user: test
65 user: test
66 date: Thu Jan 01 00:00:00 1970 +0000
66 date: Thu Jan 01 00:00:00 1970 +0000
67 summary: Update#1 to f2
67 summary: Update#1 to f2
68
68
69 changeset: 7:c42ecff04e99
69 changeset: 7:c42ecff04e99
70 user: test
70 user: test
71 date: Thu Jan 01 00:00:00 1970 +0000
71 date: Thu Jan 01 00:00:00 1970 +0000
72 summary: Update#1 to f3
72 summary: Update#1 to f3
73
73
74
74
75 Interrupting the pull is safe
75 Interrupting the pull is safe
76 $ hg --config hooks.pretxnchangegroup.bad=false pull -q
76 $ hg --config hooks.pretxnchangegroup.bad=false pull -q
77 transaction abort!
77 transaction abort!
78 rollback completed
78 rollback completed
79 abort: pretxnchangegroup.bad hook exited with status 1
79 abort: pretxnchangegroup.bad hook exited with status 1
80 [255]
80 [255]
81 $ hg id
81 $ hg id
82 223311e70a6f tip
82 223311e70a6f tip
83
83
84 pull new changes down to the narrow clone. Should get 8 new changesets: 4
84 pull new changes down to the narrow clone. Should get 8 new changesets: 4
85 relevant to the narrow spec, and 4 ellipsis nodes gluing them all together.
85 relevant to the narrow spec, and 4 ellipsis nodes gluing them all together.
86
86
87 $ hg pull
87 $ hg pull
88 pulling from ssh://user@dummy/master
88 pulling from ssh://user@dummy/master
89 searching for changes
89 searching for changes
90 adding changesets
90 adding changesets
91 adding manifests
91 adding manifests
92 adding file changes
92 adding file changes
93 added 9 changesets with 4 changes to 2 files
93 added 9 changesets with 4 changes to 2 files
94 new changesets *:* (glob)
94 new changesets *:* (glob)
95 (run 'hg update' to get a working copy)
95 (run 'hg update' to get a working copy)
96 $ hg log -T '{rev}: {desc}\n'
96 $ hg log -T '{rev}: {desc}\n'
97 13: Update#2 to f10
97 13: Update#2 to f10
98 12: Update#2 to f8
98 12: Update#2 to f8
99 11: Update#2 to f7
99 11: Update#2 to f7
100 10: Update#2 to f2
100 10: Update#2 to f2
101 9: Update#2 to f1
101 9: Update#2 to f1
102 8: Update#1 to f8
102 8: Update#1 to f8
103 7: Update#1 to f7
103 7: Update#1 to f7
104 6: Update#1 to f2
104 6: Update#1 to f2
105 5: Update#1 to f1
105 5: Update#1 to f1
106 4: Commit f10
106 4: Commit f10
107 3: Commit f8
107 3: Commit f8
108 2: Commit f7
108 2: Commit f7
109 1: Commit f2
109 1: Commit f2
110 0: Commit f1
110 0: Commit f1
111 $ hg update tip
111 $ hg update tip
112 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
113
113
114 add a change and push it
114 add a change and push it
115
115
116 $ echo "update#3 2" >> f2
116 $ echo "update#3 2" >> f2
117 $ hg commit -m "Update#3 to f2" f2
117 $ hg commit -m "Update#3 to f2" f2
118 $ hg log f2 -T '{rev}: {desc}\n'
118 $ hg log f2 -T '{rev}: {desc}\n'
119 14: Update#3 to f2
119 14: Update#3 to f2
120 10: Update#2 to f2
120 10: Update#2 to f2
121 6: Update#1 to f2
121 6: Update#1 to f2
122 1: Commit f2
122 1: Commit f2
123 $ hg push
123 $ hg push
124 pushing to ssh://user@dummy/master
124 pushing to ssh://user@dummy/master
125 searching for changes
125 searching for changes
126 remote: adding changesets
126 remote: adding changesets
127 remote: adding manifests
127 remote: adding manifests
128 remote: adding file changes
128 remote: adding file changes
129 remote: added 1 changesets with 1 changes to 1 files
129 remote: added 1 changesets with 1 changes to 1 files
130 $ cd ..
130 $ cd ..
131
131
132 $ cd master
132 $ cd master
133 $ hg log f2 -T '{rev}: {desc}\n'
133 $ hg log f2 -T '{rev}: {desc}\n'
134 30: Update#3 to f2
134 30: Update#3 to f2
135 21: Update#2 to f2
135 21: Update#2 to f2
136 11: Update#1 to f2
136 11: Update#1 to f2
137 1: Commit f2
137 1: Commit f2
138 $ hg log -l 3 -T '{rev}: {desc}\n'
138 $ hg log -l 3 -T '{rev}: {desc}\n'
139 30: Update#3 to f2
139 30: Update#3 to f2
140 29: Update#2 to f10
140 29: Update#2 to f10
141 28: Update#2 to f9
141 28: Update#2 to f9
142
142
143 Can pull into repo with a single commit
143 Can pull into repo with a single commit
144
144
145 $ cd ..
145 $ cd ..
146 $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
146 $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
147 $ cd narrow2
147 $ cd narrow2
148 $ hg pull -q -r 1
148 $ hg pull -q -r 1
149 transaction abort!
149 transaction abort!
150 rollback completed
150 rollback completed
151 abort: pull failed on remote
151 abort: pull failed on remote
152 [255]
152 [255]
153
153
154 Can use 'hg share':
154 Can use 'hg share':
155 $ cat >> $HGRCPATH <<EOF
155 $ cat >> $HGRCPATH <<EOF
156 > [extensions]
156 > [extensions]
157 > share=
157 > share=
158 > EOF
158 > EOF
159
159
160 $ cd ..
160 $ cd ..
161 $ hg share narrow2 narrow2-share
161 $ hg share narrow2 narrow2-share
162 updating working directory
162 updating working directory
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164 $ cd narrow2-share
164 $ cd narrow2-share
165 $ hg status
165 $ hg status
166
166
167 We should also be able to unshare without breaking everything:
167 We should also be able to unshare without breaking everything:
168 $ hg unshare
168 $ hg unshare
169 devel-warn: write with no wlock: "narrowspec" at: */hgext/narrow/narrowrepo.py:* (unsharenarrowspec) (glob)
170 $ hg verify
169 $ hg verify
171 checking changesets
170 checking changesets
172 checking manifests
171 checking manifests
173 crosschecking files in changesets and manifests
172 crosschecking files in changesets and manifests
174 checking files
173 checking files
175 1 files, 1 changesets, 1 total revisions
174 1 files, 1 changesets, 1 total revisions
General Comments 0
You need to be logged in to leave comments. Login now