##// END OF EJS Templates
localrepo: run cache-warming transaction callback before report callback...
Martin von Zweigbergk -
r35767:3a3b59bb default
parent child Browse files
Show More
@@ -1,2278 +1,2282 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepo,
60 subrepo,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67
67
68 release = lockmod.release
68 release = lockmod.release
69 urlerr = util.urlerr
69 urlerr = util.urlerr
70 urlreq = util.urlreq
70 urlreq = util.urlreq
71
71
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
74 # - '' for svfs relative paths
75 _cachedfiles = set()
75 _cachedfiles = set()
76
76
77 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
79 """
79 """
80 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
81 if repo is None:
81 if repo is None:
82 return self
82 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
84 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
86 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
88
89 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
91 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
93 for path in paths:
93 for path in paths:
94 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
95
95
96 def join(self, obj, fname):
96 def join(self, obj, fname):
97 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
98
98
99 class storecache(_basefilecache):
99 class storecache(_basefilecache):
100 """filecache for files in the store"""
100 """filecache for files in the store"""
101 def __init__(self, *paths):
101 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
103 for path in paths:
103 for path in paths:
104 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
105
105
106 def join(self, obj, fname):
106 def join(self, obj, fname):
107 return obj.sjoin(fname)
107 return obj.sjoin(fname)
108
108
109 def isfilecached(repo, name):
109 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
111
111
112 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
113 """
113 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
115 if not cacheentry:
116 return None, False
116 return None, False
117 return cacheentry.obj, True
117 return cacheentry.obj, True
118
118
119 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
121
121
122 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
124 if unfi is repo:
124 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
127
127
128 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
130
130
131 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
133
133
134
134
135 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
138
138
139 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
143 return wrapper
144
144
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
146 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
148
148
149 class localpeer(repository.peer):
149 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
151
151
152 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
154
154
155 if caps is None:
155 if caps is None:
156 caps = moderncaps.copy()
156 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
158 self._ui = repo.ui
158 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
160
160
161 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
162
162
163 @util.propertycache
163 @util.propertycache
164 def ui(self):
164 def ui(self):
165 return self._ui
165 return self._ui
166
166
167 def url(self):
167 def url(self):
168 return self._repo.url()
168 return self._repo.url()
169
169
170 def local(self):
170 def local(self):
171 return self._repo
171 return self._repo
172
172
173 def peer(self):
173 def peer(self):
174 return self
174 return self
175
175
176 def canpush(self):
176 def canpush(self):
177 return True
177 return True
178
178
179 def close(self):
179 def close(self):
180 self._repo.close()
180 self._repo.close()
181
181
182 # End of _basepeer interface.
182 # End of _basepeer interface.
183
183
184 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
185
185
186 def branchmap(self):
186 def branchmap(self):
187 return self._repo.branchmap()
187 return self._repo.branchmap()
188
188
189 def capabilities(self):
189 def capabilities(self):
190 return self._caps
190 return self._caps
191
191
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, three, four, five)
194 return "%s %s %s %s %s" % (one, two, three, four, five)
195
195
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 **kwargs):
197 **kwargs):
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 common=common, bundlecaps=bundlecaps,
199 common=common, bundlecaps=bundlecaps,
200 **kwargs)
200 **kwargs)
201 cb = util.chunkbuffer(chunks)
201 cb = util.chunkbuffer(chunks)
202
202
203 if exchange.bundle2requested(bundlecaps):
203 if exchange.bundle2requested(bundlecaps):
204 # When requesting a bundle2, getbundle returns a stream to make the
204 # When requesting a bundle2, getbundle returns a stream to make the
205 # wire level function happier. We need to build a proper object
205 # wire level function happier. We need to build a proper object
206 # from it in local peer.
206 # from it in local peer.
207 return bundle2.getunbundler(self.ui, cb)
207 return bundle2.getunbundler(self.ui, cb)
208 else:
208 else:
209 return changegroup.getunbundler('01', cb, None)
209 return changegroup.getunbundler('01', cb, None)
210
210
211 def heads(self):
211 def heads(self):
212 return self._repo.heads()
212 return self._repo.heads()
213
213
214 def known(self, nodes):
214 def known(self, nodes):
215 return self._repo.known(nodes)
215 return self._repo.known(nodes)
216
216
217 def listkeys(self, namespace):
217 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
219
219
220 def lookup(self, key):
220 def lookup(self, key):
221 return self._repo.lookup(key)
221 return self._repo.lookup(key)
222
222
223 def pushkey(self, namespace, key, old, new):
223 def pushkey(self, namespace, key, old, new):
224 return self._repo.pushkey(namespace, key, old, new)
224 return self._repo.pushkey(namespace, key, old, new)
225
225
226 def stream_out(self):
226 def stream_out(self):
227 raise error.Abort(_('cannot perform stream clone against local '
227 raise error.Abort(_('cannot perform stream clone against local '
228 'peer'))
228 'peer'))
229
229
230 def unbundle(self, cg, heads, url):
230 def unbundle(self, cg, heads, url):
231 """apply a bundle on a repo
231 """apply a bundle on a repo
232
232
233 This function handles the repo locking itself."""
233 This function handles the repo locking itself."""
234 try:
234 try:
235 try:
235 try:
236 cg = exchange.readbundle(self.ui, cg, None)
236 cg = exchange.readbundle(self.ui, cg, None)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 if util.safehasattr(ret, 'getchunks'):
238 if util.safehasattr(ret, 'getchunks'):
239 # This is a bundle20 object, turn it into an unbundler.
239 # This is a bundle20 object, turn it into an unbundler.
240 # This little dance should be dropped eventually when the
240 # This little dance should be dropped eventually when the
241 # API is finally improved.
241 # API is finally improved.
242 stream = util.chunkbuffer(ret.getchunks())
242 stream = util.chunkbuffer(ret.getchunks())
243 ret = bundle2.getunbundler(self.ui, stream)
243 ret = bundle2.getunbundler(self.ui, stream)
244 return ret
244 return ret
245 except Exception as exc:
245 except Exception as exc:
246 # If the exception contains output salvaged from a bundle2
246 # If the exception contains output salvaged from a bundle2
247 # reply, we need to make sure it is printed before continuing
247 # reply, we need to make sure it is printed before continuing
248 # to fail. So we build a bundle2 with such output and consume
248 # to fail. So we build a bundle2 with such output and consume
249 # it directly.
249 # it directly.
250 #
250 #
251 # This is not very elegant but allows a "simple" solution for
251 # This is not very elegant but allows a "simple" solution for
252 # issue4594
252 # issue4594
253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 if output:
254 if output:
255 bundler = bundle2.bundle20(self._repo.ui)
255 bundler = bundle2.bundle20(self._repo.ui)
256 for out in output:
256 for out in output:
257 bundler.addpart(out)
257 bundler.addpart(out)
258 stream = util.chunkbuffer(bundler.getchunks())
258 stream = util.chunkbuffer(bundler.getchunks())
259 b = bundle2.getunbundler(self.ui, stream)
259 b = bundle2.getunbundler(self.ui, stream)
260 bundle2.processbundle(self._repo, b)
260 bundle2.processbundle(self._repo, b)
261 raise
261 raise
262 except error.PushRaced as exc:
262 except error.PushRaced as exc:
263 raise error.ResponseError(_('push failed:'), str(exc))
263 raise error.ResponseError(_('push failed:'), str(exc))
264
264
265 # End of _basewirecommands interface.
265 # End of _basewirecommands interface.
266
266
267 # Begin of peer interface.
267 # Begin of peer interface.
268
268
269 def iterbatch(self):
269 def iterbatch(self):
270 return peer.localiterbatcher(self)
270 return peer.localiterbatcher(self)
271
271
272 # End of peer interface.
272 # End of peer interface.
273
273
274 class locallegacypeer(repository.legacypeer, localpeer):
274 class locallegacypeer(repository.legacypeer, localpeer):
275 '''peer extension which implements legacy methods too; used for tests with
275 '''peer extension which implements legacy methods too; used for tests with
276 restricted capabilities'''
276 restricted capabilities'''
277
277
278 def __init__(self, repo):
278 def __init__(self, repo):
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280
280
281 # Begin of baselegacywirecommands interface.
281 # Begin of baselegacywirecommands interface.
282
282
283 def between(self, pairs):
283 def between(self, pairs):
284 return self._repo.between(pairs)
284 return self._repo.between(pairs)
285
285
286 def branches(self, nodes):
286 def branches(self, nodes):
287 return self._repo.branches(nodes)
287 return self._repo.branches(nodes)
288
288
289 def changegroup(self, basenodes, source):
289 def changegroup(self, basenodes, source):
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 missingheads=self._repo.heads())
291 missingheads=self._repo.heads())
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
293
294 def changegroupsubset(self, bases, heads, source):
294 def changegroupsubset(self, bases, heads, source):
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 missingheads=heads)
296 missingheads=heads)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298
298
299 # End of baselegacywirecommands interface.
299 # End of baselegacywirecommands interface.
300
300
301 # Increment the sub-version when the revlog v2 format changes to lock out old
301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 # clients.
302 # clients.
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304
304
305 class localrepository(object):
305 class localrepository(object):
306
306
307 supportedformats = {
307 supportedformats = {
308 'revlogv1',
308 'revlogv1',
309 'generaldelta',
309 'generaldelta',
310 'treemanifest',
310 'treemanifest',
311 'manifestv2',
311 'manifestv2',
312 REVLOGV2_REQUIREMENT,
312 REVLOGV2_REQUIREMENT,
313 }
313 }
314 _basesupported = supportedformats | {
314 _basesupported = supportedformats | {
315 'store',
315 'store',
316 'fncache',
316 'fncache',
317 'shared',
317 'shared',
318 'relshared',
318 'relshared',
319 'dotencode',
319 'dotencode',
320 'exp-sparse',
320 'exp-sparse',
321 }
321 }
322 openerreqs = {
322 openerreqs = {
323 'revlogv1',
323 'revlogv1',
324 'generaldelta',
324 'generaldelta',
325 'treemanifest',
325 'treemanifest',
326 'manifestv2',
326 'manifestv2',
327 }
327 }
328
328
329 # a list of (ui, featureset) functions.
329 # a list of (ui, featureset) functions.
330 # only functions defined in module of enabled extensions are invoked
330 # only functions defined in module of enabled extensions are invoked
331 featuresetupfuncs = set()
331 featuresetupfuncs = set()
332
332
333 # list of prefix for file which can be written without 'wlock'
333 # list of prefix for file which can be written without 'wlock'
334 # Extensions should extend this list when needed
334 # Extensions should extend this list when needed
335 _wlockfreeprefix = {
335 _wlockfreeprefix = {
336 # We migh consider requiring 'wlock' for the next
336 # We migh consider requiring 'wlock' for the next
337 # two, but pretty much all the existing code assume
337 # two, but pretty much all the existing code assume
338 # wlock is not needed so we keep them excluded for
338 # wlock is not needed so we keep them excluded for
339 # now.
339 # now.
340 'hgrc',
340 'hgrc',
341 'requires',
341 'requires',
342 # XXX cache is a complicatged business someone
342 # XXX cache is a complicatged business someone
343 # should investigate this in depth at some point
343 # should investigate this in depth at some point
344 'cache/',
344 'cache/',
345 # XXX shouldn't be dirstate covered by the wlock?
345 # XXX shouldn't be dirstate covered by the wlock?
346 'dirstate',
346 'dirstate',
347 # XXX bisect was still a bit too messy at the time
347 # XXX bisect was still a bit too messy at the time
348 # this changeset was introduced. Someone should fix
348 # this changeset was introduced. Someone should fix
349 # the remainig bit and drop this line
349 # the remainig bit and drop this line
350 'bisect.state',
350 'bisect.state',
351 }
351 }
352
352
353 def __init__(self, baseui, path, create=False):
353 def __init__(self, baseui, path, create=False):
354 self.requirements = set()
354 self.requirements = set()
355 self.filtername = None
355 self.filtername = None
356 # wvfs: rooted at the repository root, used to access the working copy
356 # wvfs: rooted at the repository root, used to access the working copy
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 self.vfs = None
359 self.vfs = None
360 # svfs: usually rooted at .hg/store, used to access repository history
360 # svfs: usually rooted at .hg/store, used to access repository history
361 # If this is a shared repository, this vfs may point to another
361 # If this is a shared repository, this vfs may point to another
362 # repository's .hg/store directory.
362 # repository's .hg/store directory.
363 self.svfs = None
363 self.svfs = None
364 self.root = self.wvfs.base
364 self.root = self.wvfs.base
365 self.path = self.wvfs.join(".hg")
365 self.path = self.wvfs.join(".hg")
366 self.origroot = path
366 self.origroot = path
367 # This is only used by context.workingctx.match in order to
367 # This is only used by context.workingctx.match in order to
368 # detect files in subrepos.
368 # detect files in subrepos.
369 self.auditor = pathutil.pathauditor(
369 self.auditor = pathutil.pathauditor(
370 self.root, callback=self._checknested)
370 self.root, callback=self._checknested)
371 # This is only used by context.basectx.match in order to detect
371 # This is only used by context.basectx.match in order to detect
372 # files in subrepos.
372 # files in subrepos.
373 self.nofsauditor = pathutil.pathauditor(
373 self.nofsauditor = pathutil.pathauditor(
374 self.root, callback=self._checknested, realfs=False, cached=True)
374 self.root, callback=self._checknested, realfs=False, cached=True)
375 self.baseui = baseui
375 self.baseui = baseui
376 self.ui = baseui.copy()
376 self.ui = baseui.copy()
377 self.ui.copy = baseui.copy # prevent copying repo configuration
377 self.ui.copy = baseui.copy # prevent copying repo configuration
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
379 if (self.ui.configbool('devel', 'all-warnings') or
379 if (self.ui.configbool('devel', 'all-warnings') or
380 self.ui.configbool('devel', 'check-locks')):
380 self.ui.configbool('devel', 'check-locks')):
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
382 # A list of callback to shape the phase if no data were found.
382 # A list of callback to shape the phase if no data were found.
383 # Callback are in the form: func(repo, roots) --> processed root.
383 # Callback are in the form: func(repo, roots) --> processed root.
384 # This list it to be filled by extension during repo setup
384 # This list it to be filled by extension during repo setup
385 self._phasedefaults = []
385 self._phasedefaults = []
386 try:
386 try:
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
388 self._loadextensions()
388 self._loadextensions()
389 except IOError:
389 except IOError:
390 pass
390 pass
391
391
392 if self.featuresetupfuncs:
392 if self.featuresetupfuncs:
393 self.supported = set(self._basesupported) # use private copy
393 self.supported = set(self._basesupported) # use private copy
394 extmods = set(m.__name__ for n, m
394 extmods = set(m.__name__ for n, m
395 in extensions.extensions(self.ui))
395 in extensions.extensions(self.ui))
396 for setupfunc in self.featuresetupfuncs:
396 for setupfunc in self.featuresetupfuncs:
397 if setupfunc.__module__ in extmods:
397 if setupfunc.__module__ in extmods:
398 setupfunc(self.ui, self.supported)
398 setupfunc(self.ui, self.supported)
399 else:
399 else:
400 self.supported = self._basesupported
400 self.supported = self._basesupported
401 color.setup(self.ui)
401 color.setup(self.ui)
402
402
403 # Add compression engines.
403 # Add compression engines.
404 for name in util.compengines:
404 for name in util.compengines:
405 engine = util.compengines[name]
405 engine = util.compengines[name]
406 if engine.revlogheader():
406 if engine.revlogheader():
407 self.supported.add('exp-compression-%s' % name)
407 self.supported.add('exp-compression-%s' % name)
408
408
409 if not self.vfs.isdir():
409 if not self.vfs.isdir():
410 if create:
410 if create:
411 self.requirements = newreporequirements(self)
411 self.requirements = newreporequirements(self)
412
412
413 if not self.wvfs.exists():
413 if not self.wvfs.exists():
414 self.wvfs.makedirs()
414 self.wvfs.makedirs()
415 self.vfs.makedir(notindexed=True)
415 self.vfs.makedir(notindexed=True)
416
416
417 if 'store' in self.requirements:
417 if 'store' in self.requirements:
418 self.vfs.mkdir("store")
418 self.vfs.mkdir("store")
419
419
420 # create an invalid changelog
420 # create an invalid changelog
421 self.vfs.append(
421 self.vfs.append(
422 "00changelog.i",
422 "00changelog.i",
423 '\0\0\0\2' # represents revlogv2
423 '\0\0\0\2' # represents revlogv2
424 ' dummy changelog to prevent using the old repo layout'
424 ' dummy changelog to prevent using the old repo layout'
425 )
425 )
426 else:
426 else:
427 raise error.RepoError(_("repository %s not found") % path)
427 raise error.RepoError(_("repository %s not found") % path)
428 elif create:
428 elif create:
429 raise error.RepoError(_("repository %s already exists") % path)
429 raise error.RepoError(_("repository %s already exists") % path)
430 else:
430 else:
431 try:
431 try:
432 self.requirements = scmutil.readrequires(
432 self.requirements = scmutil.readrequires(
433 self.vfs, self.supported)
433 self.vfs, self.supported)
434 except IOError as inst:
434 except IOError as inst:
435 if inst.errno != errno.ENOENT:
435 if inst.errno != errno.ENOENT:
436 raise
436 raise
437
437
438 cachepath = self.vfs.join('cache')
438 cachepath = self.vfs.join('cache')
439 self.sharedpath = self.path
439 self.sharedpath = self.path
440 try:
440 try:
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
442 if 'relshared' in self.requirements:
442 if 'relshared' in self.requirements:
443 sharedpath = self.vfs.join(sharedpath)
443 sharedpath = self.vfs.join(sharedpath)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
445 cachepath = vfs.join('cache')
445 cachepath = vfs.join('cache')
446 s = vfs.base
446 s = vfs.base
447 if not vfs.exists():
447 if not vfs.exists():
448 raise error.RepoError(
448 raise error.RepoError(
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
450 self.sharedpath = s
450 self.sharedpath = s
451 except IOError as inst:
451 except IOError as inst:
452 if inst.errno != errno.ENOENT:
452 if inst.errno != errno.ENOENT:
453 raise
453 raise
454
454
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
456 raise error.RepoError(_('repository is using sparse feature but '
456 raise error.RepoError(_('repository is using sparse feature but '
457 'sparse is not enabled; enable the '
457 'sparse is not enabled; enable the '
458 '"sparse" extensions to access'))
458 '"sparse" extensions to access'))
459
459
460 self.store = store.store(
460 self.store = store.store(
461 self.requirements, self.sharedpath,
461 self.requirements, self.sharedpath,
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
463 self.spath = self.store.path
463 self.spath = self.store.path
464 self.svfs = self.store.vfs
464 self.svfs = self.store.vfs
465 self.sjoin = self.store.join
465 self.sjoin = self.store.join
466 self.vfs.createmode = self.store.createmode
466 self.vfs.createmode = self.store.createmode
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
468 self.cachevfs.createmode = self.store.createmode
468 self.cachevfs.createmode = self.store.createmode
469 if (self.ui.configbool('devel', 'all-warnings') or
469 if (self.ui.configbool('devel', 'all-warnings') or
470 self.ui.configbool('devel', 'check-locks')):
470 self.ui.configbool('devel', 'check-locks')):
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
473 else: # standard vfs
473 else: # standard vfs
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
475 self._applyopenerreqs()
475 self._applyopenerreqs()
476 if create:
476 if create:
477 self._writerequirements()
477 self._writerequirements()
478
478
479 self._dirstatevalidatewarned = False
479 self._dirstatevalidatewarned = False
480
480
481 self._branchcaches = {}
481 self._branchcaches = {}
482 self._revbranchcache = None
482 self._revbranchcache = None
483 self.filterpats = {}
483 self.filterpats = {}
484 self._datafilters = {}
484 self._datafilters = {}
485 self._transref = self._lockref = self._wlockref = None
485 self._transref = self._lockref = self._wlockref = None
486
486
487 # A cache for various files under .hg/ that tracks file changes,
487 # A cache for various files under .hg/ that tracks file changes,
488 # (used by the filecache decorator)
488 # (used by the filecache decorator)
489 #
489 #
490 # Maps a property name to its util.filecacheentry
490 # Maps a property name to its util.filecacheentry
491 self._filecache = {}
491 self._filecache = {}
492
492
493 # hold sets of revision to be filtered
493 # hold sets of revision to be filtered
494 # should be cleared when something might have changed the filter value:
494 # should be cleared when something might have changed the filter value:
495 # - new changesets,
495 # - new changesets,
496 # - phase change,
496 # - phase change,
497 # - new obsolescence marker,
497 # - new obsolescence marker,
498 # - working directory parent change,
498 # - working directory parent change,
499 # - bookmark changes
499 # - bookmark changes
500 self.filteredrevcache = {}
500 self.filteredrevcache = {}
501
501
502 # post-dirstate-status hooks
502 # post-dirstate-status hooks
503 self._postdsstatus = []
503 self._postdsstatus = []
504
504
505 # generic mapping between names and nodes
505 # generic mapping between names and nodes
506 self.names = namespaces.namespaces()
506 self.names = namespaces.namespaces()
507
507
508 # Key to signature value.
508 # Key to signature value.
509 self._sparsesignaturecache = {}
509 self._sparsesignaturecache = {}
510 # Signature to cached matcher instance.
510 # Signature to cached matcher instance.
511 self._sparsematchercache = {}
511 self._sparsematchercache = {}
512
512
513 def _getvfsward(self, origfunc):
513 def _getvfsward(self, origfunc):
514 """build a ward for self.vfs"""
514 """build a ward for self.vfs"""
515 rref = weakref.ref(self)
515 rref = weakref.ref(self)
516 def checkvfs(path, mode=None):
516 def checkvfs(path, mode=None):
517 ret = origfunc(path, mode=mode)
517 ret = origfunc(path, mode=mode)
518 repo = rref()
518 repo = rref()
519 if (repo is None
519 if (repo is None
520 or not util.safehasattr(repo, '_wlockref')
520 or not util.safehasattr(repo, '_wlockref')
521 or not util.safehasattr(repo, '_lockref')):
521 or not util.safehasattr(repo, '_lockref')):
522 return
522 return
523 if mode in (None, 'r', 'rb'):
523 if mode in (None, 'r', 'rb'):
524 return
524 return
525 if path.startswith(repo.path):
525 if path.startswith(repo.path):
526 # truncate name relative to the repository (.hg)
526 # truncate name relative to the repository (.hg)
527 path = path[len(repo.path) + 1:]
527 path = path[len(repo.path) + 1:]
528 if path.startswith('cache/'):
528 if path.startswith('cache/'):
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 if path.startswith('journal.'):
531 if path.startswith('journal.'):
532 # journal is covered by 'lock'
532 # journal is covered by 'lock'
533 if repo._currentlock(repo._lockref) is None:
533 if repo._currentlock(repo._lockref) is None:
534 repo.ui.develwarn('write with no lock: "%s"' % path,
534 repo.ui.develwarn('write with no lock: "%s"' % path,
535 stacklevel=2, config='check-locks')
535 stacklevel=2, config='check-locks')
536 elif repo._currentlock(repo._wlockref) is None:
536 elif repo._currentlock(repo._wlockref) is None:
537 # rest of vfs files are covered by 'wlock'
537 # rest of vfs files are covered by 'wlock'
538 #
538 #
539 # exclude special files
539 # exclude special files
540 for prefix in self._wlockfreeprefix:
540 for prefix in self._wlockfreeprefix:
541 if path.startswith(prefix):
541 if path.startswith(prefix):
542 return
542 return
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 stacklevel=2, config='check-locks')
544 stacklevel=2, config='check-locks')
545 return ret
545 return ret
546 return checkvfs
546 return checkvfs
547
547
548 def _getsvfsward(self, origfunc):
548 def _getsvfsward(self, origfunc):
549 """build a ward for self.svfs"""
549 """build a ward for self.svfs"""
550 rref = weakref.ref(self)
550 rref = weakref.ref(self)
551 def checksvfs(path, mode=None):
551 def checksvfs(path, mode=None):
552 ret = origfunc(path, mode=mode)
552 ret = origfunc(path, mode=mode)
553 repo = rref()
553 repo = rref()
554 if repo is None or not util.safehasattr(repo, '_lockref'):
554 if repo is None or not util.safehasattr(repo, '_lockref'):
555 return
555 return
556 if mode in (None, 'r', 'rb'):
556 if mode in (None, 'r', 'rb'):
557 return
557 return
558 if path.startswith(repo.sharedpath):
558 if path.startswith(repo.sharedpath):
559 # truncate name relative to the repository (.hg)
559 # truncate name relative to the repository (.hg)
560 path = path[len(repo.sharedpath) + 1:]
560 path = path[len(repo.sharedpath) + 1:]
561 if repo._currentlock(repo._lockref) is None:
561 if repo._currentlock(repo._lockref) is None:
562 repo.ui.develwarn('write with no lock: "%s"' % path,
562 repo.ui.develwarn('write with no lock: "%s"' % path,
563 stacklevel=3)
563 stacklevel=3)
564 return ret
564 return ret
565 return checksvfs
565 return checksvfs
566
566
567 def close(self):
567 def close(self):
568 self._writecaches()
568 self._writecaches()
569
569
570 def _loadextensions(self):
570 def _loadextensions(self):
571 extensions.loadall(self.ui)
571 extensions.loadall(self.ui)
572
572
573 def _writecaches(self):
573 def _writecaches(self):
574 if self._revbranchcache:
574 if self._revbranchcache:
575 self._revbranchcache.write()
575 self._revbranchcache.write()
576
576
577 def _restrictcapabilities(self, caps):
577 def _restrictcapabilities(self, caps):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 caps = set(caps)
579 caps = set(caps)
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
582 return caps
582 return caps
583
583
584 def _applyopenerreqs(self):
584 def _applyopenerreqs(self):
585 self.svfs.options = dict((r, 1) for r in self.requirements
585 self.svfs.options = dict((r, 1) for r in self.requirements
586 if r in self.openerreqs)
586 if r in self.openerreqs)
587 # experimental config: format.chunkcachesize
587 # experimental config: format.chunkcachesize
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 if chunkcachesize is not None:
589 if chunkcachesize is not None:
590 self.svfs.options['chunkcachesize'] = chunkcachesize
590 self.svfs.options['chunkcachesize'] = chunkcachesize
591 # experimental config: format.maxchainlen
591 # experimental config: format.maxchainlen
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 if maxchainlen is not None:
593 if maxchainlen is not None:
594 self.svfs.options['maxchainlen'] = maxchainlen
594 self.svfs.options['maxchainlen'] = maxchainlen
595 # experimental config: format.manifestcachesize
595 # experimental config: format.manifestcachesize
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 if manifestcachesize is not None:
597 if manifestcachesize is not None:
598 self.svfs.options['manifestcachesize'] = manifestcachesize
598 self.svfs.options['manifestcachesize'] = manifestcachesize
599 # experimental config: format.aggressivemergedeltas
599 # experimental config: format.aggressivemergedeltas
600 aggressivemergedeltas = self.ui.configbool('format',
600 aggressivemergedeltas = self.ui.configbool('format',
601 'aggressivemergedeltas')
601 'aggressivemergedeltas')
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 if 0 <= chainspan:
605 if 0 <= chainspan:
606 self.svfs.options['maxdeltachainspan'] = chainspan
606 self.svfs.options['maxdeltachainspan'] = chainspan
607 mmapindexthreshold = self.ui.configbytes('experimental',
607 mmapindexthreshold = self.ui.configbytes('experimental',
608 'mmapindexthreshold')
608 'mmapindexthreshold')
609 if mmapindexthreshold is not None:
609 if mmapindexthreshold is not None:
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 srdensitythres = float(self.ui.config('experimental',
612 srdensitythres = float(self.ui.config('experimental',
613 'sparse-read.density-threshold'))
613 'sparse-read.density-threshold'))
614 srmingapsize = self.ui.configbytes('experimental',
614 srmingapsize = self.ui.configbytes('experimental',
615 'sparse-read.min-gap-size')
615 'sparse-read.min-gap-size')
616 self.svfs.options['with-sparse-read'] = withsparseread
616 self.svfs.options['with-sparse-read'] = withsparseread
617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619
619
620 for r in self.requirements:
620 for r in self.requirements:
621 if r.startswith('exp-compression-'):
621 if r.startswith('exp-compression-'):
622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623
623
624 # TODO move "revlogv2" to openerreqs once finalized.
624 # TODO move "revlogv2" to openerreqs once finalized.
625 if REVLOGV2_REQUIREMENT in self.requirements:
625 if REVLOGV2_REQUIREMENT in self.requirements:
626 self.svfs.options['revlogv2'] = True
626 self.svfs.options['revlogv2'] = True
627
627
628 def _writerequirements(self):
628 def _writerequirements(self):
629 scmutil.writerequires(self.vfs, self.requirements)
629 scmutil.writerequires(self.vfs, self.requirements)
630
630
631 def _checknested(self, path):
631 def _checknested(self, path):
632 """Determine if path is a legal nested repository."""
632 """Determine if path is a legal nested repository."""
633 if not path.startswith(self.root):
633 if not path.startswith(self.root):
634 return False
634 return False
635 subpath = path[len(self.root) + 1:]
635 subpath = path[len(self.root) + 1:]
636 normsubpath = util.pconvert(subpath)
636 normsubpath = util.pconvert(subpath)
637
637
638 # XXX: Checking against the current working copy is wrong in
638 # XXX: Checking against the current working copy is wrong in
639 # the sense that it can reject things like
639 # the sense that it can reject things like
640 #
640 #
641 # $ hg cat -r 10 sub/x.txt
641 # $ hg cat -r 10 sub/x.txt
642 #
642 #
643 # if sub/ is no longer a subrepository in the working copy
643 # if sub/ is no longer a subrepository in the working copy
644 # parent revision.
644 # parent revision.
645 #
645 #
646 # However, it can of course also allow things that would have
646 # However, it can of course also allow things that would have
647 # been rejected before, such as the above cat command if sub/
647 # been rejected before, such as the above cat command if sub/
648 # is a subrepository now, but was a normal directory before.
648 # is a subrepository now, but was a normal directory before.
649 # The old path auditor would have rejected by mistake since it
649 # The old path auditor would have rejected by mistake since it
650 # panics when it sees sub/.hg/.
650 # panics when it sees sub/.hg/.
651 #
651 #
652 # All in all, checking against the working copy seems sensible
652 # All in all, checking against the working copy seems sensible
653 # since we want to prevent access to nested repositories on
653 # since we want to prevent access to nested repositories on
654 # the filesystem *now*.
654 # the filesystem *now*.
655 ctx = self[None]
655 ctx = self[None]
656 parts = util.splitpath(subpath)
656 parts = util.splitpath(subpath)
657 while parts:
657 while parts:
658 prefix = '/'.join(parts)
658 prefix = '/'.join(parts)
659 if prefix in ctx.substate:
659 if prefix in ctx.substate:
660 if prefix == normsubpath:
660 if prefix == normsubpath:
661 return True
661 return True
662 else:
662 else:
663 sub = ctx.sub(prefix)
663 sub = ctx.sub(prefix)
664 return sub.checknested(subpath[len(prefix) + 1:])
664 return sub.checknested(subpath[len(prefix) + 1:])
665 else:
665 else:
666 parts.pop()
666 parts.pop()
667 return False
667 return False
668
668
669 def peer(self):
669 def peer(self):
670 return localpeer(self) # not cached to avoid reference cycle
670 return localpeer(self) # not cached to avoid reference cycle
671
671
672 def unfiltered(self):
672 def unfiltered(self):
673 """Return unfiltered version of the repository
673 """Return unfiltered version of the repository
674
674
675 Intended to be overwritten by filtered repo."""
675 Intended to be overwritten by filtered repo."""
676 return self
676 return self
677
677
678 def filtered(self, name, visibilityexceptions=None):
678 def filtered(self, name, visibilityexceptions=None):
679 """Return a filtered version of a repository"""
679 """Return a filtered version of a repository"""
680 cls = repoview.newtype(self.unfiltered().__class__)
680 cls = repoview.newtype(self.unfiltered().__class__)
681 return cls(self, name, visibilityexceptions)
681 return cls(self, name, visibilityexceptions)
682
682
683 @repofilecache('bookmarks', 'bookmarks.current')
683 @repofilecache('bookmarks', 'bookmarks.current')
684 def _bookmarks(self):
684 def _bookmarks(self):
685 return bookmarks.bmstore(self)
685 return bookmarks.bmstore(self)
686
686
687 @property
687 @property
688 def _activebookmark(self):
688 def _activebookmark(self):
689 return self._bookmarks.active
689 return self._bookmarks.active
690
690
691 # _phasesets depend on changelog. what we need is to call
691 # _phasesets depend on changelog. what we need is to call
692 # _phasecache.invalidate() if '00changelog.i' was changed, but it
692 # _phasecache.invalidate() if '00changelog.i' was changed, but it
693 # can't be easily expressed in filecache mechanism.
693 # can't be easily expressed in filecache mechanism.
694 @storecache('phaseroots', '00changelog.i')
694 @storecache('phaseroots', '00changelog.i')
695 def _phasecache(self):
695 def _phasecache(self):
696 return phases.phasecache(self, self._phasedefaults)
696 return phases.phasecache(self, self._phasedefaults)
697
697
698 @storecache('obsstore')
698 @storecache('obsstore')
699 def obsstore(self):
699 def obsstore(self):
700 return obsolete.makestore(self.ui, self)
700 return obsolete.makestore(self.ui, self)
701
701
702 @storecache('00changelog.i')
702 @storecache('00changelog.i')
703 def changelog(self):
703 def changelog(self):
704 return changelog.changelog(self.svfs,
704 return changelog.changelog(self.svfs,
705 trypending=txnutil.mayhavepending(self.root))
705 trypending=txnutil.mayhavepending(self.root))
706
706
707 def _constructmanifest(self):
707 def _constructmanifest(self):
708 # This is a temporary function while we migrate from manifest to
708 # This is a temporary function while we migrate from manifest to
709 # manifestlog. It allows bundlerepo and unionrepo to intercept the
709 # manifestlog. It allows bundlerepo and unionrepo to intercept the
710 # manifest creation.
710 # manifest creation.
711 return manifest.manifestrevlog(self.svfs)
711 return manifest.manifestrevlog(self.svfs)
712
712
713 @storecache('00manifest.i')
713 @storecache('00manifest.i')
714 def manifestlog(self):
714 def manifestlog(self):
715 return manifest.manifestlog(self.svfs, self)
715 return manifest.manifestlog(self.svfs, self)
716
716
717 @repofilecache('dirstate')
717 @repofilecache('dirstate')
718 def dirstate(self):
718 def dirstate(self):
719 sparsematchfn = lambda: sparse.matcher(self)
719 sparsematchfn = lambda: sparse.matcher(self)
720
720
721 return dirstate.dirstate(self.vfs, self.ui, self.root,
721 return dirstate.dirstate(self.vfs, self.ui, self.root,
722 self._dirstatevalidate, sparsematchfn)
722 self._dirstatevalidate, sparsematchfn)
723
723
724 def _dirstatevalidate(self, node):
724 def _dirstatevalidate(self, node):
725 try:
725 try:
726 self.changelog.rev(node)
726 self.changelog.rev(node)
727 return node
727 return node
728 except error.LookupError:
728 except error.LookupError:
729 if not self._dirstatevalidatewarned:
729 if not self._dirstatevalidatewarned:
730 self._dirstatevalidatewarned = True
730 self._dirstatevalidatewarned = True
731 self.ui.warn(_("warning: ignoring unknown"
731 self.ui.warn(_("warning: ignoring unknown"
732 " working parent %s!\n") % short(node))
732 " working parent %s!\n") % short(node))
733 return nullid
733 return nullid
734
734
735 def __getitem__(self, changeid):
735 def __getitem__(self, changeid):
736 if changeid is None:
736 if changeid is None:
737 return context.workingctx(self)
737 return context.workingctx(self)
738 if isinstance(changeid, slice):
738 if isinstance(changeid, slice):
739 # wdirrev isn't contiguous so the slice shouldn't include it
739 # wdirrev isn't contiguous so the slice shouldn't include it
740 return [context.changectx(self, i)
740 return [context.changectx(self, i)
741 for i in xrange(*changeid.indices(len(self)))
741 for i in xrange(*changeid.indices(len(self)))
742 if i not in self.changelog.filteredrevs]
742 if i not in self.changelog.filteredrevs]
743 try:
743 try:
744 return context.changectx(self, changeid)
744 return context.changectx(self, changeid)
745 except error.WdirUnsupported:
745 except error.WdirUnsupported:
746 return context.workingctx(self)
746 return context.workingctx(self)
747
747
748 def __contains__(self, changeid):
748 def __contains__(self, changeid):
749 """True if the given changeid exists
749 """True if the given changeid exists
750
750
751 error.LookupError is raised if an ambiguous node specified.
751 error.LookupError is raised if an ambiguous node specified.
752 """
752 """
753 try:
753 try:
754 self[changeid]
754 self[changeid]
755 return True
755 return True
756 except error.RepoLookupError:
756 except error.RepoLookupError:
757 return False
757 return False
758
758
759 def __nonzero__(self):
759 def __nonzero__(self):
760 return True
760 return True
761
761
762 __bool__ = __nonzero__
762 __bool__ = __nonzero__
763
763
764 def __len__(self):
764 def __len__(self):
765 # no need to pay the cost of repoview.changelog
765 # no need to pay the cost of repoview.changelog
766 unfi = self.unfiltered()
766 unfi = self.unfiltered()
767 return len(unfi.changelog)
767 return len(unfi.changelog)
768
768
769 def __iter__(self):
769 def __iter__(self):
770 return iter(self.changelog)
770 return iter(self.changelog)
771
771
772 def revs(self, expr, *args):
772 def revs(self, expr, *args):
773 '''Find revisions matching a revset.
773 '''Find revisions matching a revset.
774
774
775 The revset is specified as a string ``expr`` that may contain
775 The revset is specified as a string ``expr`` that may contain
776 %-formatting to escape certain types. See ``revsetlang.formatspec``.
776 %-formatting to escape certain types. See ``revsetlang.formatspec``.
777
777
778 Revset aliases from the configuration are not expanded. To expand
778 Revset aliases from the configuration are not expanded. To expand
779 user aliases, consider calling ``scmutil.revrange()`` or
779 user aliases, consider calling ``scmutil.revrange()`` or
780 ``repo.anyrevs([expr], user=True)``.
780 ``repo.anyrevs([expr], user=True)``.
781
781
782 Returns a revset.abstractsmartset, which is a list-like interface
782 Returns a revset.abstractsmartset, which is a list-like interface
783 that contains integer revisions.
783 that contains integer revisions.
784 '''
784 '''
785 expr = revsetlang.formatspec(expr, *args)
785 expr = revsetlang.formatspec(expr, *args)
786 m = revset.match(None, expr)
786 m = revset.match(None, expr)
787 return m(self)
787 return m(self)
788
788
789 def set(self, expr, *args):
789 def set(self, expr, *args):
790 '''Find revisions matching a revset and emit changectx instances.
790 '''Find revisions matching a revset and emit changectx instances.
791
791
792 This is a convenience wrapper around ``revs()`` that iterates the
792 This is a convenience wrapper around ``revs()`` that iterates the
793 result and is a generator of changectx instances.
793 result and is a generator of changectx instances.
794
794
795 Revset aliases from the configuration are not expanded. To expand
795 Revset aliases from the configuration are not expanded. To expand
796 user aliases, consider calling ``scmutil.revrange()``.
796 user aliases, consider calling ``scmutil.revrange()``.
797 '''
797 '''
798 for r in self.revs(expr, *args):
798 for r in self.revs(expr, *args):
799 yield self[r]
799 yield self[r]
800
800
801 def anyrevs(self, specs, user=False, localalias=None):
801 def anyrevs(self, specs, user=False, localalias=None):
802 '''Find revisions matching one of the given revsets.
802 '''Find revisions matching one of the given revsets.
803
803
804 Revset aliases from the configuration are not expanded by default. To
804 Revset aliases from the configuration are not expanded by default. To
805 expand user aliases, specify ``user=True``. To provide some local
805 expand user aliases, specify ``user=True``. To provide some local
806 definitions overriding user aliases, set ``localalias`` to
806 definitions overriding user aliases, set ``localalias`` to
807 ``{name: definitionstring}``.
807 ``{name: definitionstring}``.
808 '''
808 '''
809 if user:
809 if user:
810 m = revset.matchany(self.ui, specs, repo=self,
810 m = revset.matchany(self.ui, specs, repo=self,
811 localalias=localalias)
811 localalias=localalias)
812 else:
812 else:
813 m = revset.matchany(None, specs, localalias=localalias)
813 m = revset.matchany(None, specs, localalias=localalias)
814 return m(self)
814 return m(self)
815
815
816 def url(self):
816 def url(self):
817 return 'file:' + self.root
817 return 'file:' + self.root
818
818
819 def hook(self, name, throw=False, **args):
819 def hook(self, name, throw=False, **args):
820 """Call a hook, passing this repo instance.
820 """Call a hook, passing this repo instance.
821
821
822 This a convenience method to aid invoking hooks. Extensions likely
822 This a convenience method to aid invoking hooks. Extensions likely
823 won't call this unless they have registered a custom hook or are
823 won't call this unless they have registered a custom hook or are
824 replacing code that is expected to call a hook.
824 replacing code that is expected to call a hook.
825 """
825 """
826 return hook.hook(self.ui, self, name, throw, **args)
826 return hook.hook(self.ui, self, name, throw, **args)
827
827
828 @filteredpropertycache
828 @filteredpropertycache
829 def _tagscache(self):
829 def _tagscache(self):
830 '''Returns a tagscache object that contains various tags related
830 '''Returns a tagscache object that contains various tags related
831 caches.'''
831 caches.'''
832
832
833 # This simplifies its cache management by having one decorated
833 # This simplifies its cache management by having one decorated
834 # function (this one) and the rest simply fetch things from it.
834 # function (this one) and the rest simply fetch things from it.
835 class tagscache(object):
835 class tagscache(object):
836 def __init__(self):
836 def __init__(self):
837 # These two define the set of tags for this repository. tags
837 # These two define the set of tags for this repository. tags
838 # maps tag name to node; tagtypes maps tag name to 'global' or
838 # maps tag name to node; tagtypes maps tag name to 'global' or
839 # 'local'. (Global tags are defined by .hgtags across all
839 # 'local'. (Global tags are defined by .hgtags across all
840 # heads, and local tags are defined in .hg/localtags.)
840 # heads, and local tags are defined in .hg/localtags.)
841 # They constitute the in-memory cache of tags.
841 # They constitute the in-memory cache of tags.
842 self.tags = self.tagtypes = None
842 self.tags = self.tagtypes = None
843
843
844 self.nodetagscache = self.tagslist = None
844 self.nodetagscache = self.tagslist = None
845
845
846 cache = tagscache()
846 cache = tagscache()
847 cache.tags, cache.tagtypes = self._findtags()
847 cache.tags, cache.tagtypes = self._findtags()
848
848
849 return cache
849 return cache
850
850
851 def tags(self):
851 def tags(self):
852 '''return a mapping of tag to node'''
852 '''return a mapping of tag to node'''
853 t = {}
853 t = {}
854 if self.changelog.filteredrevs:
854 if self.changelog.filteredrevs:
855 tags, tt = self._findtags()
855 tags, tt = self._findtags()
856 else:
856 else:
857 tags = self._tagscache.tags
857 tags = self._tagscache.tags
858 for k, v in tags.iteritems():
858 for k, v in tags.iteritems():
859 try:
859 try:
860 # ignore tags to unknown nodes
860 # ignore tags to unknown nodes
861 self.changelog.rev(v)
861 self.changelog.rev(v)
862 t[k] = v
862 t[k] = v
863 except (error.LookupError, ValueError):
863 except (error.LookupError, ValueError):
864 pass
864 pass
865 return t
865 return t
866
866
867 def _findtags(self):
867 def _findtags(self):
868 '''Do the hard work of finding tags. Return a pair of dicts
868 '''Do the hard work of finding tags. Return a pair of dicts
869 (tags, tagtypes) where tags maps tag name to node, and tagtypes
869 (tags, tagtypes) where tags maps tag name to node, and tagtypes
870 maps tag name to a string like \'global\' or \'local\'.
870 maps tag name to a string like \'global\' or \'local\'.
871 Subclasses or extensions are free to add their own tags, but
871 Subclasses or extensions are free to add their own tags, but
872 should be aware that the returned dicts will be retained for the
872 should be aware that the returned dicts will be retained for the
873 duration of the localrepo object.'''
873 duration of the localrepo object.'''
874
874
875 # XXX what tagtype should subclasses/extensions use? Currently
875 # XXX what tagtype should subclasses/extensions use? Currently
876 # mq and bookmarks add tags, but do not set the tagtype at all.
876 # mq and bookmarks add tags, but do not set the tagtype at all.
877 # Should each extension invent its own tag type? Should there
877 # Should each extension invent its own tag type? Should there
878 # be one tagtype for all such "virtual" tags? Or is the status
878 # be one tagtype for all such "virtual" tags? Or is the status
879 # quo fine?
879 # quo fine?
880
880
881
881
882 # map tag name to (node, hist)
882 # map tag name to (node, hist)
883 alltags = tagsmod.findglobaltags(self.ui, self)
883 alltags = tagsmod.findglobaltags(self.ui, self)
884 # map tag name to tag type
884 # map tag name to tag type
885 tagtypes = dict((tag, 'global') for tag in alltags)
885 tagtypes = dict((tag, 'global') for tag in alltags)
886
886
887 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
887 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
888
888
889 # Build the return dicts. Have to re-encode tag names because
889 # Build the return dicts. Have to re-encode tag names because
890 # the tags module always uses UTF-8 (in order not to lose info
890 # the tags module always uses UTF-8 (in order not to lose info
891 # writing to the cache), but the rest of Mercurial wants them in
891 # writing to the cache), but the rest of Mercurial wants them in
892 # local encoding.
892 # local encoding.
893 tags = {}
893 tags = {}
894 for (name, (node, hist)) in alltags.iteritems():
894 for (name, (node, hist)) in alltags.iteritems():
895 if node != nullid:
895 if node != nullid:
896 tags[encoding.tolocal(name)] = node
896 tags[encoding.tolocal(name)] = node
897 tags['tip'] = self.changelog.tip()
897 tags['tip'] = self.changelog.tip()
898 tagtypes = dict([(encoding.tolocal(name), value)
898 tagtypes = dict([(encoding.tolocal(name), value)
899 for (name, value) in tagtypes.iteritems()])
899 for (name, value) in tagtypes.iteritems()])
900 return (tags, tagtypes)
900 return (tags, tagtypes)
901
901
902 def tagtype(self, tagname):
902 def tagtype(self, tagname):
903 '''
903 '''
904 return the type of the given tag. result can be:
904 return the type of the given tag. result can be:
905
905
906 'local' : a local tag
906 'local' : a local tag
907 'global' : a global tag
907 'global' : a global tag
908 None : tag does not exist
908 None : tag does not exist
909 '''
909 '''
910
910
911 return self._tagscache.tagtypes.get(tagname)
911 return self._tagscache.tagtypes.get(tagname)
912
912
913 def tagslist(self):
913 def tagslist(self):
914 '''return a list of tags ordered by revision'''
914 '''return a list of tags ordered by revision'''
915 if not self._tagscache.tagslist:
915 if not self._tagscache.tagslist:
916 l = []
916 l = []
917 for t, n in self.tags().iteritems():
917 for t, n in self.tags().iteritems():
918 l.append((self.changelog.rev(n), t, n))
918 l.append((self.changelog.rev(n), t, n))
919 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
919 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
920
920
921 return self._tagscache.tagslist
921 return self._tagscache.tagslist
922
922
923 def nodetags(self, node):
923 def nodetags(self, node):
924 '''return the tags associated with a node'''
924 '''return the tags associated with a node'''
925 if not self._tagscache.nodetagscache:
925 if not self._tagscache.nodetagscache:
926 nodetagscache = {}
926 nodetagscache = {}
927 for t, n in self._tagscache.tags.iteritems():
927 for t, n in self._tagscache.tags.iteritems():
928 nodetagscache.setdefault(n, []).append(t)
928 nodetagscache.setdefault(n, []).append(t)
929 for tags in nodetagscache.itervalues():
929 for tags in nodetagscache.itervalues():
930 tags.sort()
930 tags.sort()
931 self._tagscache.nodetagscache = nodetagscache
931 self._tagscache.nodetagscache = nodetagscache
932 return self._tagscache.nodetagscache.get(node, [])
932 return self._tagscache.nodetagscache.get(node, [])
933
933
934 def nodebookmarks(self, node):
934 def nodebookmarks(self, node):
935 """return the list of bookmarks pointing to the specified node"""
935 """return the list of bookmarks pointing to the specified node"""
936 marks = []
936 marks = []
937 for bookmark, n in self._bookmarks.iteritems():
937 for bookmark, n in self._bookmarks.iteritems():
938 if n == node:
938 if n == node:
939 marks.append(bookmark)
939 marks.append(bookmark)
940 return sorted(marks)
940 return sorted(marks)
941
941
942 def branchmap(self):
942 def branchmap(self):
943 '''returns a dictionary {branch: [branchheads]} with branchheads
943 '''returns a dictionary {branch: [branchheads]} with branchheads
944 ordered by increasing revision number'''
944 ordered by increasing revision number'''
945 branchmap.updatecache(self)
945 branchmap.updatecache(self)
946 return self._branchcaches[self.filtername]
946 return self._branchcaches[self.filtername]
947
947
948 @unfilteredmethod
948 @unfilteredmethod
949 def revbranchcache(self):
949 def revbranchcache(self):
950 if not self._revbranchcache:
950 if not self._revbranchcache:
951 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
951 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
952 return self._revbranchcache
952 return self._revbranchcache
953
953
954 def branchtip(self, branch, ignoremissing=False):
954 def branchtip(self, branch, ignoremissing=False):
955 '''return the tip node for a given branch
955 '''return the tip node for a given branch
956
956
957 If ignoremissing is True, then this method will not raise an error.
957 If ignoremissing is True, then this method will not raise an error.
958 This is helpful for callers that only expect None for a missing branch
958 This is helpful for callers that only expect None for a missing branch
959 (e.g. namespace).
959 (e.g. namespace).
960
960
961 '''
961 '''
962 try:
962 try:
963 return self.branchmap().branchtip(branch)
963 return self.branchmap().branchtip(branch)
964 except KeyError:
964 except KeyError:
965 if not ignoremissing:
965 if not ignoremissing:
966 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
966 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
967 else:
967 else:
968 pass
968 pass
969
969
970 def lookup(self, key):
970 def lookup(self, key):
971 return self[key].node()
971 return self[key].node()
972
972
973 def lookupbranch(self, key, remote=None):
973 def lookupbranch(self, key, remote=None):
974 repo = remote or self
974 repo = remote or self
975 if key in repo.branchmap():
975 if key in repo.branchmap():
976 return key
976 return key
977
977
978 repo = (remote and remote.local()) and remote or self
978 repo = (remote and remote.local()) and remote or self
979 return repo[key].branch()
979 return repo[key].branch()
980
980
981 def known(self, nodes):
981 def known(self, nodes):
982 cl = self.changelog
982 cl = self.changelog
983 nm = cl.nodemap
983 nm = cl.nodemap
984 filtered = cl.filteredrevs
984 filtered = cl.filteredrevs
985 result = []
985 result = []
986 for n in nodes:
986 for n in nodes:
987 r = nm.get(n)
987 r = nm.get(n)
988 resp = not (r is None or r in filtered)
988 resp = not (r is None or r in filtered)
989 result.append(resp)
989 result.append(resp)
990 return result
990 return result
991
991
992 def local(self):
992 def local(self):
993 return self
993 return self
994
994
995 def publishing(self):
995 def publishing(self):
996 # it's safe (and desirable) to trust the publish flag unconditionally
996 # it's safe (and desirable) to trust the publish flag unconditionally
997 # so that we don't finalize changes shared between users via ssh or nfs
997 # so that we don't finalize changes shared between users via ssh or nfs
998 return self.ui.configbool('phases', 'publish', untrusted=True)
998 return self.ui.configbool('phases', 'publish', untrusted=True)
999
999
1000 def cancopy(self):
1000 def cancopy(self):
1001 # so statichttprepo's override of local() works
1001 # so statichttprepo's override of local() works
1002 if not self.local():
1002 if not self.local():
1003 return False
1003 return False
1004 if not self.publishing():
1004 if not self.publishing():
1005 return True
1005 return True
1006 # if publishing we can't copy if there is filtered content
1006 # if publishing we can't copy if there is filtered content
1007 return not self.filtered('visible').changelog.filteredrevs
1007 return not self.filtered('visible').changelog.filteredrevs
1008
1008
1009 def shared(self):
1009 def shared(self):
1010 '''the type of shared repository (None if not shared)'''
1010 '''the type of shared repository (None if not shared)'''
1011 if self.sharedpath != self.path:
1011 if self.sharedpath != self.path:
1012 return 'store'
1012 return 'store'
1013 return None
1013 return None
1014
1014
1015 def wjoin(self, f, *insidef):
1015 def wjoin(self, f, *insidef):
1016 return self.vfs.reljoin(self.root, f, *insidef)
1016 return self.vfs.reljoin(self.root, f, *insidef)
1017
1017
1018 def file(self, f):
1018 def file(self, f):
1019 if f[0] == '/':
1019 if f[0] == '/':
1020 f = f[1:]
1020 f = f[1:]
1021 return filelog.filelog(self.svfs, f)
1021 return filelog.filelog(self.svfs, f)
1022
1022
1023 def changectx(self, changeid):
1023 def changectx(self, changeid):
1024 return self[changeid]
1024 return self[changeid]
1025
1025
1026 def setparents(self, p1, p2=nullid):
1026 def setparents(self, p1, p2=nullid):
1027 with self.dirstate.parentchange():
1027 with self.dirstate.parentchange():
1028 copies = self.dirstate.setparents(p1, p2)
1028 copies = self.dirstate.setparents(p1, p2)
1029 pctx = self[p1]
1029 pctx = self[p1]
1030 if copies:
1030 if copies:
1031 # Adjust copy records, the dirstate cannot do it, it
1031 # Adjust copy records, the dirstate cannot do it, it
1032 # requires access to parents manifests. Preserve them
1032 # requires access to parents manifests. Preserve them
1033 # only for entries added to first parent.
1033 # only for entries added to first parent.
1034 for f in copies:
1034 for f in copies:
1035 if f not in pctx and copies[f] in pctx:
1035 if f not in pctx and copies[f] in pctx:
1036 self.dirstate.copy(copies[f], f)
1036 self.dirstate.copy(copies[f], f)
1037 if p2 == nullid:
1037 if p2 == nullid:
1038 for f, s in sorted(self.dirstate.copies().items()):
1038 for f, s in sorted(self.dirstate.copies().items()):
1039 if f not in pctx and s not in pctx:
1039 if f not in pctx and s not in pctx:
1040 self.dirstate.copy(None, f)
1040 self.dirstate.copy(None, f)
1041
1041
1042 def filectx(self, path, changeid=None, fileid=None):
1042 def filectx(self, path, changeid=None, fileid=None):
1043 """changeid can be a changeset revision, node, or tag.
1043 """changeid can be a changeset revision, node, or tag.
1044 fileid can be a file revision or node."""
1044 fileid can be a file revision or node."""
1045 return context.filectx(self, path, changeid, fileid)
1045 return context.filectx(self, path, changeid, fileid)
1046
1046
1047 def getcwd(self):
1047 def getcwd(self):
1048 return self.dirstate.getcwd()
1048 return self.dirstate.getcwd()
1049
1049
1050 def pathto(self, f, cwd=None):
1050 def pathto(self, f, cwd=None):
1051 return self.dirstate.pathto(f, cwd)
1051 return self.dirstate.pathto(f, cwd)
1052
1052
1053 def _loadfilter(self, filter):
1053 def _loadfilter(self, filter):
1054 if filter not in self.filterpats:
1054 if filter not in self.filterpats:
1055 l = []
1055 l = []
1056 for pat, cmd in self.ui.configitems(filter):
1056 for pat, cmd in self.ui.configitems(filter):
1057 if cmd == '!':
1057 if cmd == '!':
1058 continue
1058 continue
1059 mf = matchmod.match(self.root, '', [pat])
1059 mf = matchmod.match(self.root, '', [pat])
1060 fn = None
1060 fn = None
1061 params = cmd
1061 params = cmd
1062 for name, filterfn in self._datafilters.iteritems():
1062 for name, filterfn in self._datafilters.iteritems():
1063 if cmd.startswith(name):
1063 if cmd.startswith(name):
1064 fn = filterfn
1064 fn = filterfn
1065 params = cmd[len(name):].lstrip()
1065 params = cmd[len(name):].lstrip()
1066 break
1066 break
1067 if not fn:
1067 if not fn:
1068 fn = lambda s, c, **kwargs: util.filter(s, c)
1068 fn = lambda s, c, **kwargs: util.filter(s, c)
1069 # Wrap old filters not supporting keyword arguments
1069 # Wrap old filters not supporting keyword arguments
1070 if not inspect.getargspec(fn)[2]:
1070 if not inspect.getargspec(fn)[2]:
1071 oldfn = fn
1071 oldfn = fn
1072 fn = lambda s, c, **kwargs: oldfn(s, c)
1072 fn = lambda s, c, **kwargs: oldfn(s, c)
1073 l.append((mf, fn, params))
1073 l.append((mf, fn, params))
1074 self.filterpats[filter] = l
1074 self.filterpats[filter] = l
1075 return self.filterpats[filter]
1075 return self.filterpats[filter]
1076
1076
1077 def _filter(self, filterpats, filename, data):
1077 def _filter(self, filterpats, filename, data):
1078 for mf, fn, cmd in filterpats:
1078 for mf, fn, cmd in filterpats:
1079 if mf(filename):
1079 if mf(filename):
1080 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1080 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1081 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1081 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1082 break
1082 break
1083
1083
1084 return data
1084 return data
1085
1085
1086 @unfilteredpropertycache
1086 @unfilteredpropertycache
1087 def _encodefilterpats(self):
1087 def _encodefilterpats(self):
1088 return self._loadfilter('encode')
1088 return self._loadfilter('encode')
1089
1089
1090 @unfilteredpropertycache
1090 @unfilteredpropertycache
1091 def _decodefilterpats(self):
1091 def _decodefilterpats(self):
1092 return self._loadfilter('decode')
1092 return self._loadfilter('decode')
1093
1093
1094 def adddatafilter(self, name, filter):
1094 def adddatafilter(self, name, filter):
1095 self._datafilters[name] = filter
1095 self._datafilters[name] = filter
1096
1096
1097 def wread(self, filename):
1097 def wread(self, filename):
1098 if self.wvfs.islink(filename):
1098 if self.wvfs.islink(filename):
1099 data = self.wvfs.readlink(filename)
1099 data = self.wvfs.readlink(filename)
1100 else:
1100 else:
1101 data = self.wvfs.read(filename)
1101 data = self.wvfs.read(filename)
1102 return self._filter(self._encodefilterpats, filename, data)
1102 return self._filter(self._encodefilterpats, filename, data)
1103
1103
1104 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1104 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1105 """write ``data`` into ``filename`` in the working directory
1105 """write ``data`` into ``filename`` in the working directory
1106
1106
1107 This returns length of written (maybe decoded) data.
1107 This returns length of written (maybe decoded) data.
1108 """
1108 """
1109 data = self._filter(self._decodefilterpats, filename, data)
1109 data = self._filter(self._decodefilterpats, filename, data)
1110 if 'l' in flags:
1110 if 'l' in flags:
1111 self.wvfs.symlink(data, filename)
1111 self.wvfs.symlink(data, filename)
1112 else:
1112 else:
1113 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1113 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1114 **kwargs)
1114 **kwargs)
1115 if 'x' in flags:
1115 if 'x' in flags:
1116 self.wvfs.setflags(filename, False, True)
1116 self.wvfs.setflags(filename, False, True)
1117 else:
1117 else:
1118 self.wvfs.setflags(filename, False, False)
1118 self.wvfs.setflags(filename, False, False)
1119 return len(data)
1119 return len(data)
1120
1120
1121 def wwritedata(self, filename, data):
1121 def wwritedata(self, filename, data):
1122 return self._filter(self._decodefilterpats, filename, data)
1122 return self._filter(self._decodefilterpats, filename, data)
1123
1123
1124 def currenttransaction(self):
1124 def currenttransaction(self):
1125 """return the current transaction or None if non exists"""
1125 """return the current transaction or None if non exists"""
1126 if self._transref:
1126 if self._transref:
1127 tr = self._transref()
1127 tr = self._transref()
1128 else:
1128 else:
1129 tr = None
1129 tr = None
1130
1130
1131 if tr and tr.running():
1131 if tr and tr.running():
1132 return tr
1132 return tr
1133 return None
1133 return None
1134
1134
1135 def transaction(self, desc, report=None):
1135 def transaction(self, desc, report=None):
1136 if (self.ui.configbool('devel', 'all-warnings')
1136 if (self.ui.configbool('devel', 'all-warnings')
1137 or self.ui.configbool('devel', 'check-locks')):
1137 or self.ui.configbool('devel', 'check-locks')):
1138 if self._currentlock(self._lockref) is None:
1138 if self._currentlock(self._lockref) is None:
1139 raise error.ProgrammingError('transaction requires locking')
1139 raise error.ProgrammingError('transaction requires locking')
1140 tr = self.currenttransaction()
1140 tr = self.currenttransaction()
1141 if tr is not None:
1141 if tr is not None:
1142 return tr.nest()
1142 return tr.nest()
1143
1143
1144 # abort here if the journal already exists
1144 # abort here if the journal already exists
1145 if self.svfs.exists("journal"):
1145 if self.svfs.exists("journal"):
1146 raise error.RepoError(
1146 raise error.RepoError(
1147 _("abandoned transaction found"),
1147 _("abandoned transaction found"),
1148 hint=_("run 'hg recover' to clean up transaction"))
1148 hint=_("run 'hg recover' to clean up transaction"))
1149
1149
1150 idbase = "%.40f#%f" % (random.random(), time.time())
1150 idbase = "%.40f#%f" % (random.random(), time.time())
1151 ha = hex(hashlib.sha1(idbase).digest())
1151 ha = hex(hashlib.sha1(idbase).digest())
1152 txnid = 'TXN:' + ha
1152 txnid = 'TXN:' + ha
1153 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1153 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1154
1154
1155 self._writejournal(desc)
1155 self._writejournal(desc)
1156 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1156 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1157 if report:
1157 if report:
1158 rp = report
1158 rp = report
1159 else:
1159 else:
1160 rp = self.ui.warn
1160 rp = self.ui.warn
1161 vfsmap = {'plain': self.vfs} # root of .hg/
1161 vfsmap = {'plain': self.vfs} # root of .hg/
1162 # we must avoid cyclic reference between repo and transaction.
1162 # we must avoid cyclic reference between repo and transaction.
1163 reporef = weakref.ref(self)
1163 reporef = weakref.ref(self)
1164 # Code to track tag movement
1164 # Code to track tag movement
1165 #
1165 #
1166 # Since tags are all handled as file content, it is actually quite hard
1166 # Since tags are all handled as file content, it is actually quite hard
1167 # to track these movement from a code perspective. So we fallback to a
1167 # to track these movement from a code perspective. So we fallback to a
1168 # tracking at the repository level. One could envision to track changes
1168 # tracking at the repository level. One could envision to track changes
1169 # to the '.hgtags' file through changegroup apply but that fails to
1169 # to the '.hgtags' file through changegroup apply but that fails to
1170 # cope with case where transaction expose new heads without changegroup
1170 # cope with case where transaction expose new heads without changegroup
1171 # being involved (eg: phase movement).
1171 # being involved (eg: phase movement).
1172 #
1172 #
1173 # For now, We gate the feature behind a flag since this likely comes
1173 # For now, We gate the feature behind a flag since this likely comes
1174 # with performance impacts. The current code run more often than needed
1174 # with performance impacts. The current code run more often than needed
1175 # and do not use caches as much as it could. The current focus is on
1175 # and do not use caches as much as it could. The current focus is on
1176 # the behavior of the feature so we disable it by default. The flag
1176 # the behavior of the feature so we disable it by default. The flag
1177 # will be removed when we are happy with the performance impact.
1177 # will be removed when we are happy with the performance impact.
1178 #
1178 #
1179 # Once this feature is no longer experimental move the following
1179 # Once this feature is no longer experimental move the following
1180 # documentation to the appropriate help section:
1180 # documentation to the appropriate help section:
1181 #
1181 #
1182 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1182 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1183 # tags (new or changed or deleted tags). In addition the details of
1183 # tags (new or changed or deleted tags). In addition the details of
1184 # these changes are made available in a file at:
1184 # these changes are made available in a file at:
1185 # ``REPOROOT/.hg/changes/tags.changes``.
1185 # ``REPOROOT/.hg/changes/tags.changes``.
1186 # Make sure you check for HG_TAG_MOVED before reading that file as it
1186 # Make sure you check for HG_TAG_MOVED before reading that file as it
1187 # might exist from a previous transaction even if no tag were touched
1187 # might exist from a previous transaction even if no tag were touched
1188 # in this one. Changes are recorded in a line base format::
1188 # in this one. Changes are recorded in a line base format::
1189 #
1189 #
1190 # <action> <hex-node> <tag-name>\n
1190 # <action> <hex-node> <tag-name>\n
1191 #
1191 #
1192 # Actions are defined as follow:
1192 # Actions are defined as follow:
1193 # "-R": tag is removed,
1193 # "-R": tag is removed,
1194 # "+A": tag is added,
1194 # "+A": tag is added,
1195 # "-M": tag is moved (old value),
1195 # "-M": tag is moved (old value),
1196 # "+M": tag is moved (new value),
1196 # "+M": tag is moved (new value),
1197 tracktags = lambda x: None
1197 tracktags = lambda x: None
1198 # experimental config: experimental.hook-track-tags
1198 # experimental config: experimental.hook-track-tags
1199 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1199 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1200 if desc != 'strip' and shouldtracktags:
1200 if desc != 'strip' and shouldtracktags:
1201 oldheads = self.changelog.headrevs()
1201 oldheads = self.changelog.headrevs()
1202 def tracktags(tr2):
1202 def tracktags(tr2):
1203 repo = reporef()
1203 repo = reporef()
1204 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1204 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1205 newheads = repo.changelog.headrevs()
1205 newheads = repo.changelog.headrevs()
1206 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1206 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1207 # notes: we compare lists here.
1207 # notes: we compare lists here.
1208 # As we do it only once buiding set would not be cheaper
1208 # As we do it only once buiding set would not be cheaper
1209 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1209 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1210 if changes:
1210 if changes:
1211 tr2.hookargs['tag_moved'] = '1'
1211 tr2.hookargs['tag_moved'] = '1'
1212 with repo.vfs('changes/tags.changes', 'w',
1212 with repo.vfs('changes/tags.changes', 'w',
1213 atomictemp=True) as changesfile:
1213 atomictemp=True) as changesfile:
1214 # note: we do not register the file to the transaction
1214 # note: we do not register the file to the transaction
1215 # because we needs it to still exist on the transaction
1215 # because we needs it to still exist on the transaction
1216 # is close (for txnclose hooks)
1216 # is close (for txnclose hooks)
1217 tagsmod.writediff(changesfile, changes)
1217 tagsmod.writediff(changesfile, changes)
1218 def validate(tr2):
1218 def validate(tr2):
1219 """will run pre-closing hooks"""
1219 """will run pre-closing hooks"""
1220 # XXX the transaction API is a bit lacking here so we take a hacky
1220 # XXX the transaction API is a bit lacking here so we take a hacky
1221 # path for now
1221 # path for now
1222 #
1222 #
1223 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1223 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1224 # dict is copied before these run. In addition we needs the data
1224 # dict is copied before these run. In addition we needs the data
1225 # available to in memory hooks too.
1225 # available to in memory hooks too.
1226 #
1226 #
1227 # Moreover, we also need to make sure this runs before txnclose
1227 # Moreover, we also need to make sure this runs before txnclose
1228 # hooks and there is no "pending" mechanism that would execute
1228 # hooks and there is no "pending" mechanism that would execute
1229 # logic only if hooks are about to run.
1229 # logic only if hooks are about to run.
1230 #
1230 #
1231 # Fixing this limitation of the transaction is also needed to track
1231 # Fixing this limitation of the transaction is also needed to track
1232 # other families of changes (bookmarks, phases, obsolescence).
1232 # other families of changes (bookmarks, phases, obsolescence).
1233 #
1233 #
1234 # This will have to be fixed before we remove the experimental
1234 # This will have to be fixed before we remove the experimental
1235 # gating.
1235 # gating.
1236 tracktags(tr2)
1236 tracktags(tr2)
1237 repo = reporef()
1237 repo = reporef()
1238 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1238 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1239 scmutil.enforcesinglehead(repo, tr2, desc)
1239 scmutil.enforcesinglehead(repo, tr2, desc)
1240 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1240 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1241 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1241 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1242 args = tr.hookargs.copy()
1242 args = tr.hookargs.copy()
1243 args.update(bookmarks.preparehookargs(name, old, new))
1243 args.update(bookmarks.preparehookargs(name, old, new))
1244 repo.hook('pretxnclose-bookmark', throw=True,
1244 repo.hook('pretxnclose-bookmark', throw=True,
1245 txnname=desc,
1245 txnname=desc,
1246 **pycompat.strkwargs(args))
1246 **pycompat.strkwargs(args))
1247 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1247 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1248 cl = repo.unfiltered().changelog
1248 cl = repo.unfiltered().changelog
1249 for rev, (old, new) in tr.changes['phases'].items():
1249 for rev, (old, new) in tr.changes['phases'].items():
1250 args = tr.hookargs.copy()
1250 args = tr.hookargs.copy()
1251 node = hex(cl.node(rev))
1251 node = hex(cl.node(rev))
1252 args.update(phases.preparehookargs(node, old, new))
1252 args.update(phases.preparehookargs(node, old, new))
1253 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1253 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1254 **pycompat.strkwargs(args))
1254 **pycompat.strkwargs(args))
1255
1255
1256 repo.hook('pretxnclose', throw=True,
1256 repo.hook('pretxnclose', throw=True,
1257 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1257 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1258 def releasefn(tr, success):
1258 def releasefn(tr, success):
1259 repo = reporef()
1259 repo = reporef()
1260 if success:
1260 if success:
1261 # this should be explicitly invoked here, because
1261 # this should be explicitly invoked here, because
1262 # in-memory changes aren't written out at closing
1262 # in-memory changes aren't written out at closing
1263 # transaction, if tr.addfilegenerator (via
1263 # transaction, if tr.addfilegenerator (via
1264 # dirstate.write or so) isn't invoked while
1264 # dirstate.write or so) isn't invoked while
1265 # transaction running
1265 # transaction running
1266 repo.dirstate.write(None)
1266 repo.dirstate.write(None)
1267 else:
1267 else:
1268 # discard all changes (including ones already written
1268 # discard all changes (including ones already written
1269 # out) in this transaction
1269 # out) in this transaction
1270 repo.dirstate.restorebackup(None, 'journal.dirstate')
1270 repo.dirstate.restorebackup(None, 'journal.dirstate')
1271
1271
1272 repo.invalidate(clearfilecache=True)
1272 repo.invalidate(clearfilecache=True)
1273
1273
1274 tr = transaction.transaction(rp, self.svfs, vfsmap,
1274 tr = transaction.transaction(rp, self.svfs, vfsmap,
1275 "journal",
1275 "journal",
1276 "undo",
1276 "undo",
1277 aftertrans(renames),
1277 aftertrans(renames),
1278 self.store.createmode,
1278 self.store.createmode,
1279 validator=validate,
1279 validator=validate,
1280 releasefn=releasefn,
1280 releasefn=releasefn,
1281 checkambigfiles=_cachedfiles)
1281 checkambigfiles=_cachedfiles)
1282 tr.changes['revs'] = xrange(0, 0)
1282 tr.changes['revs'] = xrange(0, 0)
1283 tr.changes['obsmarkers'] = set()
1283 tr.changes['obsmarkers'] = set()
1284 tr.changes['phases'] = {}
1284 tr.changes['phases'] = {}
1285 tr.changes['bookmarks'] = {}
1285 tr.changes['bookmarks'] = {}
1286
1286
1287 tr.hookargs['txnid'] = txnid
1287 tr.hookargs['txnid'] = txnid
1288 # note: writing the fncache only during finalize mean that the file is
1288 # note: writing the fncache only during finalize mean that the file is
1289 # outdated when running hooks. As fncache is used for streaming clone,
1289 # outdated when running hooks. As fncache is used for streaming clone,
1290 # this is not expected to break anything that happen during the hooks.
1290 # this is not expected to break anything that happen during the hooks.
1291 tr.addfinalize('flush-fncache', self.store.write)
1291 tr.addfinalize('flush-fncache', self.store.write)
1292 def txnclosehook(tr2):
1292 def txnclosehook(tr2):
1293 """To be run if transaction is successful, will schedule a hook run
1293 """To be run if transaction is successful, will schedule a hook run
1294 """
1294 """
1295 # Don't reference tr2 in hook() so we don't hold a reference.
1295 # Don't reference tr2 in hook() so we don't hold a reference.
1296 # This reduces memory consumption when there are multiple
1296 # This reduces memory consumption when there are multiple
1297 # transactions per lock. This can likely go away if issue5045
1297 # transactions per lock. This can likely go away if issue5045
1298 # fixes the function accumulation.
1298 # fixes the function accumulation.
1299 hookargs = tr2.hookargs
1299 hookargs = tr2.hookargs
1300
1300
1301 def hookfunc():
1301 def hookfunc():
1302 repo = reporef()
1302 repo = reporef()
1303 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1303 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1304 bmchanges = sorted(tr.changes['bookmarks'].items())
1304 bmchanges = sorted(tr.changes['bookmarks'].items())
1305 for name, (old, new) in bmchanges:
1305 for name, (old, new) in bmchanges:
1306 args = tr.hookargs.copy()
1306 args = tr.hookargs.copy()
1307 args.update(bookmarks.preparehookargs(name, old, new))
1307 args.update(bookmarks.preparehookargs(name, old, new))
1308 repo.hook('txnclose-bookmark', throw=False,
1308 repo.hook('txnclose-bookmark', throw=False,
1309 txnname=desc, **pycompat.strkwargs(args))
1309 txnname=desc, **pycompat.strkwargs(args))
1310
1310
1311 if hook.hashook(repo.ui, 'txnclose-phase'):
1311 if hook.hashook(repo.ui, 'txnclose-phase'):
1312 cl = repo.unfiltered().changelog
1312 cl = repo.unfiltered().changelog
1313 phasemv = sorted(tr.changes['phases'].items())
1313 phasemv = sorted(tr.changes['phases'].items())
1314 for rev, (old, new) in phasemv:
1314 for rev, (old, new) in phasemv:
1315 args = tr.hookargs.copy()
1315 args = tr.hookargs.copy()
1316 node = hex(cl.node(rev))
1316 node = hex(cl.node(rev))
1317 args.update(phases.preparehookargs(node, old, new))
1317 args.update(phases.preparehookargs(node, old, new))
1318 repo.hook('txnclose-phase', throw=False, txnname=desc,
1318 repo.hook('txnclose-phase', throw=False, txnname=desc,
1319 **pycompat.strkwargs(args))
1319 **pycompat.strkwargs(args))
1320
1320
1321 repo.hook('txnclose', throw=False, txnname=desc,
1321 repo.hook('txnclose', throw=False, txnname=desc,
1322 **pycompat.strkwargs(hookargs))
1322 **pycompat.strkwargs(hookargs))
1323 reporef()._afterlock(hookfunc)
1323 reporef()._afterlock(hookfunc)
1324 tr.addfinalize('txnclose-hook', txnclosehook)
1324 tr.addfinalize('txnclose-hook', txnclosehook)
1325 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1325 # Include a leading "-" to make it happen before the transaction summary
1326 # reports registered via scmutil.registersummarycallback() whose names
1327 # are 00-txnreport etc. That way, the caches will be warm when the
1328 # callbacks run.
1329 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1326 def txnaborthook(tr2):
1330 def txnaborthook(tr2):
1327 """To be run if transaction is aborted
1331 """To be run if transaction is aborted
1328 """
1332 """
1329 reporef().hook('txnabort', throw=False, txnname=desc,
1333 reporef().hook('txnabort', throw=False, txnname=desc,
1330 **tr2.hookargs)
1334 **tr2.hookargs)
1331 tr.addabort('txnabort-hook', txnaborthook)
1335 tr.addabort('txnabort-hook', txnaborthook)
1332 # avoid eager cache invalidation. in-memory data should be identical
1336 # avoid eager cache invalidation. in-memory data should be identical
1333 # to stored data if transaction has no error.
1337 # to stored data if transaction has no error.
1334 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1338 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1335 self._transref = weakref.ref(tr)
1339 self._transref = weakref.ref(tr)
1336 scmutil.registersummarycallback(self, tr, desc)
1340 scmutil.registersummarycallback(self, tr, desc)
1337 return tr
1341 return tr
1338
1342
1339 def _journalfiles(self):
1343 def _journalfiles(self):
1340 return ((self.svfs, 'journal'),
1344 return ((self.svfs, 'journal'),
1341 (self.vfs, 'journal.dirstate'),
1345 (self.vfs, 'journal.dirstate'),
1342 (self.vfs, 'journal.branch'),
1346 (self.vfs, 'journal.branch'),
1343 (self.vfs, 'journal.desc'),
1347 (self.vfs, 'journal.desc'),
1344 (self.vfs, 'journal.bookmarks'),
1348 (self.vfs, 'journal.bookmarks'),
1345 (self.svfs, 'journal.phaseroots'))
1349 (self.svfs, 'journal.phaseroots'))
1346
1350
1347 def undofiles(self):
1351 def undofiles(self):
1348 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1352 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1349
1353
1350 @unfilteredmethod
1354 @unfilteredmethod
1351 def _writejournal(self, desc):
1355 def _writejournal(self, desc):
1352 self.dirstate.savebackup(None, 'journal.dirstate')
1356 self.dirstate.savebackup(None, 'journal.dirstate')
1353 self.vfs.write("journal.branch",
1357 self.vfs.write("journal.branch",
1354 encoding.fromlocal(self.dirstate.branch()))
1358 encoding.fromlocal(self.dirstate.branch()))
1355 self.vfs.write("journal.desc",
1359 self.vfs.write("journal.desc",
1356 "%d\n%s\n" % (len(self), desc))
1360 "%d\n%s\n" % (len(self), desc))
1357 self.vfs.write("journal.bookmarks",
1361 self.vfs.write("journal.bookmarks",
1358 self.vfs.tryread("bookmarks"))
1362 self.vfs.tryread("bookmarks"))
1359 self.svfs.write("journal.phaseroots",
1363 self.svfs.write("journal.phaseroots",
1360 self.svfs.tryread("phaseroots"))
1364 self.svfs.tryread("phaseroots"))
1361
1365
1362 def recover(self):
1366 def recover(self):
1363 with self.lock():
1367 with self.lock():
1364 if self.svfs.exists("journal"):
1368 if self.svfs.exists("journal"):
1365 self.ui.status(_("rolling back interrupted transaction\n"))
1369 self.ui.status(_("rolling back interrupted transaction\n"))
1366 vfsmap = {'': self.svfs,
1370 vfsmap = {'': self.svfs,
1367 'plain': self.vfs,}
1371 'plain': self.vfs,}
1368 transaction.rollback(self.svfs, vfsmap, "journal",
1372 transaction.rollback(self.svfs, vfsmap, "journal",
1369 self.ui.warn,
1373 self.ui.warn,
1370 checkambigfiles=_cachedfiles)
1374 checkambigfiles=_cachedfiles)
1371 self.invalidate()
1375 self.invalidate()
1372 return True
1376 return True
1373 else:
1377 else:
1374 self.ui.warn(_("no interrupted transaction available\n"))
1378 self.ui.warn(_("no interrupted transaction available\n"))
1375 return False
1379 return False
1376
1380
1377 def rollback(self, dryrun=False, force=False):
1381 def rollback(self, dryrun=False, force=False):
1378 wlock = lock = dsguard = None
1382 wlock = lock = dsguard = None
1379 try:
1383 try:
1380 wlock = self.wlock()
1384 wlock = self.wlock()
1381 lock = self.lock()
1385 lock = self.lock()
1382 if self.svfs.exists("undo"):
1386 if self.svfs.exists("undo"):
1383 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1387 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1384
1388
1385 return self._rollback(dryrun, force, dsguard)
1389 return self._rollback(dryrun, force, dsguard)
1386 else:
1390 else:
1387 self.ui.warn(_("no rollback information available\n"))
1391 self.ui.warn(_("no rollback information available\n"))
1388 return 1
1392 return 1
1389 finally:
1393 finally:
1390 release(dsguard, lock, wlock)
1394 release(dsguard, lock, wlock)
1391
1395
1392 @unfilteredmethod # Until we get smarter cache management
1396 @unfilteredmethod # Until we get smarter cache management
1393 def _rollback(self, dryrun, force, dsguard):
1397 def _rollback(self, dryrun, force, dsguard):
1394 ui = self.ui
1398 ui = self.ui
1395 try:
1399 try:
1396 args = self.vfs.read('undo.desc').splitlines()
1400 args = self.vfs.read('undo.desc').splitlines()
1397 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1401 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1398 if len(args) >= 3:
1402 if len(args) >= 3:
1399 detail = args[2]
1403 detail = args[2]
1400 oldtip = oldlen - 1
1404 oldtip = oldlen - 1
1401
1405
1402 if detail and ui.verbose:
1406 if detail and ui.verbose:
1403 msg = (_('repository tip rolled back to revision %d'
1407 msg = (_('repository tip rolled back to revision %d'
1404 ' (undo %s: %s)\n')
1408 ' (undo %s: %s)\n')
1405 % (oldtip, desc, detail))
1409 % (oldtip, desc, detail))
1406 else:
1410 else:
1407 msg = (_('repository tip rolled back to revision %d'
1411 msg = (_('repository tip rolled back to revision %d'
1408 ' (undo %s)\n')
1412 ' (undo %s)\n')
1409 % (oldtip, desc))
1413 % (oldtip, desc))
1410 except IOError:
1414 except IOError:
1411 msg = _('rolling back unknown transaction\n')
1415 msg = _('rolling back unknown transaction\n')
1412 desc = None
1416 desc = None
1413
1417
1414 if not force and self['.'] != self['tip'] and desc == 'commit':
1418 if not force and self['.'] != self['tip'] and desc == 'commit':
1415 raise error.Abort(
1419 raise error.Abort(
1416 _('rollback of last commit while not checked out '
1420 _('rollback of last commit while not checked out '
1417 'may lose data'), hint=_('use -f to force'))
1421 'may lose data'), hint=_('use -f to force'))
1418
1422
1419 ui.status(msg)
1423 ui.status(msg)
1420 if dryrun:
1424 if dryrun:
1421 return 0
1425 return 0
1422
1426
1423 parents = self.dirstate.parents()
1427 parents = self.dirstate.parents()
1424 self.destroying()
1428 self.destroying()
1425 vfsmap = {'plain': self.vfs, '': self.svfs}
1429 vfsmap = {'plain': self.vfs, '': self.svfs}
1426 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1430 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1427 checkambigfiles=_cachedfiles)
1431 checkambigfiles=_cachedfiles)
1428 if self.vfs.exists('undo.bookmarks'):
1432 if self.vfs.exists('undo.bookmarks'):
1429 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1433 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1430 if self.svfs.exists('undo.phaseroots'):
1434 if self.svfs.exists('undo.phaseroots'):
1431 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1435 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1432 self.invalidate()
1436 self.invalidate()
1433
1437
1434 parentgone = (parents[0] not in self.changelog.nodemap or
1438 parentgone = (parents[0] not in self.changelog.nodemap or
1435 parents[1] not in self.changelog.nodemap)
1439 parents[1] not in self.changelog.nodemap)
1436 if parentgone:
1440 if parentgone:
1437 # prevent dirstateguard from overwriting already restored one
1441 # prevent dirstateguard from overwriting already restored one
1438 dsguard.close()
1442 dsguard.close()
1439
1443
1440 self.dirstate.restorebackup(None, 'undo.dirstate')
1444 self.dirstate.restorebackup(None, 'undo.dirstate')
1441 try:
1445 try:
1442 branch = self.vfs.read('undo.branch')
1446 branch = self.vfs.read('undo.branch')
1443 self.dirstate.setbranch(encoding.tolocal(branch))
1447 self.dirstate.setbranch(encoding.tolocal(branch))
1444 except IOError:
1448 except IOError:
1445 ui.warn(_('named branch could not be reset: '
1449 ui.warn(_('named branch could not be reset: '
1446 'current branch is still \'%s\'\n')
1450 'current branch is still \'%s\'\n')
1447 % self.dirstate.branch())
1451 % self.dirstate.branch())
1448
1452
1449 parents = tuple([p.rev() for p in self[None].parents()])
1453 parents = tuple([p.rev() for p in self[None].parents()])
1450 if len(parents) > 1:
1454 if len(parents) > 1:
1451 ui.status(_('working directory now based on '
1455 ui.status(_('working directory now based on '
1452 'revisions %d and %d\n') % parents)
1456 'revisions %d and %d\n') % parents)
1453 else:
1457 else:
1454 ui.status(_('working directory now based on '
1458 ui.status(_('working directory now based on '
1455 'revision %d\n') % parents)
1459 'revision %d\n') % parents)
1456 mergemod.mergestate.clean(self, self['.'].node())
1460 mergemod.mergestate.clean(self, self['.'].node())
1457
1461
1458 # TODO: if we know which new heads may result from this rollback, pass
1462 # TODO: if we know which new heads may result from this rollback, pass
1459 # them to destroy(), which will prevent the branchhead cache from being
1463 # them to destroy(), which will prevent the branchhead cache from being
1460 # invalidated.
1464 # invalidated.
1461 self.destroyed()
1465 self.destroyed()
1462 return 0
1466 return 0
1463
1467
1464 def _buildcacheupdater(self, newtransaction):
1468 def _buildcacheupdater(self, newtransaction):
1465 """called during transaction to build the callback updating cache
1469 """called during transaction to build the callback updating cache
1466
1470
1467 Lives on the repository to help extension who might want to augment
1471 Lives on the repository to help extension who might want to augment
1468 this logic. For this purpose, the created transaction is passed to the
1472 this logic. For this purpose, the created transaction is passed to the
1469 method.
1473 method.
1470 """
1474 """
1471 # we must avoid cyclic reference between repo and transaction.
1475 # we must avoid cyclic reference between repo and transaction.
1472 reporef = weakref.ref(self)
1476 reporef = weakref.ref(self)
1473 def updater(tr):
1477 def updater(tr):
1474 repo = reporef()
1478 repo = reporef()
1475 repo.updatecaches(tr)
1479 repo.updatecaches(tr)
1476 return updater
1480 return updater
1477
1481
1478 @unfilteredmethod
1482 @unfilteredmethod
1479 def updatecaches(self, tr=None):
1483 def updatecaches(self, tr=None):
1480 """warm appropriate caches
1484 """warm appropriate caches
1481
1485
1482 If this function is called after a transaction closed. The transaction
1486 If this function is called after a transaction closed. The transaction
1483 will be available in the 'tr' argument. This can be used to selectively
1487 will be available in the 'tr' argument. This can be used to selectively
1484 update caches relevant to the changes in that transaction.
1488 update caches relevant to the changes in that transaction.
1485 """
1489 """
1486 if tr is not None and tr.hookargs.get('source') == 'strip':
1490 if tr is not None and tr.hookargs.get('source') == 'strip':
1487 # During strip, many caches are invalid but
1491 # During strip, many caches are invalid but
1488 # later call to `destroyed` will refresh them.
1492 # later call to `destroyed` will refresh them.
1489 return
1493 return
1490
1494
1491 if tr is None or tr.changes['revs']:
1495 if tr is None or tr.changes['revs']:
1492 # updating the unfiltered branchmap should refresh all the others,
1496 # updating the unfiltered branchmap should refresh all the others,
1493 self.ui.debug('updating the branch cache\n')
1497 self.ui.debug('updating the branch cache\n')
1494 branchmap.updatecache(self.filtered('served'))
1498 branchmap.updatecache(self.filtered('served'))
1495
1499
1496 def invalidatecaches(self):
1500 def invalidatecaches(self):
1497
1501
1498 if '_tagscache' in vars(self):
1502 if '_tagscache' in vars(self):
1499 # can't use delattr on proxy
1503 # can't use delattr on proxy
1500 del self.__dict__['_tagscache']
1504 del self.__dict__['_tagscache']
1501
1505
1502 self.unfiltered()._branchcaches.clear()
1506 self.unfiltered()._branchcaches.clear()
1503 self.invalidatevolatilesets()
1507 self.invalidatevolatilesets()
1504 self._sparsesignaturecache.clear()
1508 self._sparsesignaturecache.clear()
1505
1509
1506 def invalidatevolatilesets(self):
1510 def invalidatevolatilesets(self):
1507 self.filteredrevcache.clear()
1511 self.filteredrevcache.clear()
1508 obsolete.clearobscaches(self)
1512 obsolete.clearobscaches(self)
1509
1513
1510 def invalidatedirstate(self):
1514 def invalidatedirstate(self):
1511 '''Invalidates the dirstate, causing the next call to dirstate
1515 '''Invalidates the dirstate, causing the next call to dirstate
1512 to check if it was modified since the last time it was read,
1516 to check if it was modified since the last time it was read,
1513 rereading it if it has.
1517 rereading it if it has.
1514
1518
1515 This is different to dirstate.invalidate() that it doesn't always
1519 This is different to dirstate.invalidate() that it doesn't always
1516 rereads the dirstate. Use dirstate.invalidate() if you want to
1520 rereads the dirstate. Use dirstate.invalidate() if you want to
1517 explicitly read the dirstate again (i.e. restoring it to a previous
1521 explicitly read the dirstate again (i.e. restoring it to a previous
1518 known good state).'''
1522 known good state).'''
1519 if hasunfilteredcache(self, 'dirstate'):
1523 if hasunfilteredcache(self, 'dirstate'):
1520 for k in self.dirstate._filecache:
1524 for k in self.dirstate._filecache:
1521 try:
1525 try:
1522 delattr(self.dirstate, k)
1526 delattr(self.dirstate, k)
1523 except AttributeError:
1527 except AttributeError:
1524 pass
1528 pass
1525 delattr(self.unfiltered(), 'dirstate')
1529 delattr(self.unfiltered(), 'dirstate')
1526
1530
1527 def invalidate(self, clearfilecache=False):
1531 def invalidate(self, clearfilecache=False):
1528 '''Invalidates both store and non-store parts other than dirstate
1532 '''Invalidates both store and non-store parts other than dirstate
1529
1533
1530 If a transaction is running, invalidation of store is omitted,
1534 If a transaction is running, invalidation of store is omitted,
1531 because discarding in-memory changes might cause inconsistency
1535 because discarding in-memory changes might cause inconsistency
1532 (e.g. incomplete fncache causes unintentional failure, but
1536 (e.g. incomplete fncache causes unintentional failure, but
1533 redundant one doesn't).
1537 redundant one doesn't).
1534 '''
1538 '''
1535 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1539 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1536 for k in list(self._filecache.keys()):
1540 for k in list(self._filecache.keys()):
1537 # dirstate is invalidated separately in invalidatedirstate()
1541 # dirstate is invalidated separately in invalidatedirstate()
1538 if k == 'dirstate':
1542 if k == 'dirstate':
1539 continue
1543 continue
1540 if (k == 'changelog' and
1544 if (k == 'changelog' and
1541 self.currenttransaction() and
1545 self.currenttransaction() and
1542 self.changelog._delayed):
1546 self.changelog._delayed):
1543 # The changelog object may store unwritten revisions. We don't
1547 # The changelog object may store unwritten revisions. We don't
1544 # want to lose them.
1548 # want to lose them.
1545 # TODO: Solve the problem instead of working around it.
1549 # TODO: Solve the problem instead of working around it.
1546 continue
1550 continue
1547
1551
1548 if clearfilecache:
1552 if clearfilecache:
1549 del self._filecache[k]
1553 del self._filecache[k]
1550 try:
1554 try:
1551 delattr(unfiltered, k)
1555 delattr(unfiltered, k)
1552 except AttributeError:
1556 except AttributeError:
1553 pass
1557 pass
1554 self.invalidatecaches()
1558 self.invalidatecaches()
1555 if not self.currenttransaction():
1559 if not self.currenttransaction():
1556 # TODO: Changing contents of store outside transaction
1560 # TODO: Changing contents of store outside transaction
1557 # causes inconsistency. We should make in-memory store
1561 # causes inconsistency. We should make in-memory store
1558 # changes detectable, and abort if changed.
1562 # changes detectable, and abort if changed.
1559 self.store.invalidatecaches()
1563 self.store.invalidatecaches()
1560
1564
1561 def invalidateall(self):
1565 def invalidateall(self):
1562 '''Fully invalidates both store and non-store parts, causing the
1566 '''Fully invalidates both store and non-store parts, causing the
1563 subsequent operation to reread any outside changes.'''
1567 subsequent operation to reread any outside changes.'''
1564 # extension should hook this to invalidate its caches
1568 # extension should hook this to invalidate its caches
1565 self.invalidate()
1569 self.invalidate()
1566 self.invalidatedirstate()
1570 self.invalidatedirstate()
1567
1571
1568 @unfilteredmethod
1572 @unfilteredmethod
1569 def _refreshfilecachestats(self, tr):
1573 def _refreshfilecachestats(self, tr):
1570 """Reload stats of cached files so that they are flagged as valid"""
1574 """Reload stats of cached files so that they are flagged as valid"""
1571 for k, ce in self._filecache.items():
1575 for k, ce in self._filecache.items():
1572 if k == 'dirstate' or k not in self.__dict__:
1576 if k == 'dirstate' or k not in self.__dict__:
1573 continue
1577 continue
1574 ce.refresh()
1578 ce.refresh()
1575
1579
1576 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1580 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1577 inheritchecker=None, parentenvvar=None):
1581 inheritchecker=None, parentenvvar=None):
1578 parentlock = None
1582 parentlock = None
1579 # the contents of parentenvvar are used by the underlying lock to
1583 # the contents of parentenvvar are used by the underlying lock to
1580 # determine whether it can be inherited
1584 # determine whether it can be inherited
1581 if parentenvvar is not None:
1585 if parentenvvar is not None:
1582 parentlock = encoding.environ.get(parentenvvar)
1586 parentlock = encoding.environ.get(parentenvvar)
1583
1587
1584 timeout = 0
1588 timeout = 0
1585 warntimeout = 0
1589 warntimeout = 0
1586 if wait:
1590 if wait:
1587 timeout = self.ui.configint("ui", "timeout")
1591 timeout = self.ui.configint("ui", "timeout")
1588 warntimeout = self.ui.configint("ui", "timeout.warn")
1592 warntimeout = self.ui.configint("ui", "timeout.warn")
1589
1593
1590 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1594 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1591 releasefn=releasefn,
1595 releasefn=releasefn,
1592 acquirefn=acquirefn, desc=desc,
1596 acquirefn=acquirefn, desc=desc,
1593 inheritchecker=inheritchecker,
1597 inheritchecker=inheritchecker,
1594 parentlock=parentlock)
1598 parentlock=parentlock)
1595 return l
1599 return l
1596
1600
1597 def _afterlock(self, callback):
1601 def _afterlock(self, callback):
1598 """add a callback to be run when the repository is fully unlocked
1602 """add a callback to be run when the repository is fully unlocked
1599
1603
1600 The callback will be executed when the outermost lock is released
1604 The callback will be executed when the outermost lock is released
1601 (with wlock being higher level than 'lock')."""
1605 (with wlock being higher level than 'lock')."""
1602 for ref in (self._wlockref, self._lockref):
1606 for ref in (self._wlockref, self._lockref):
1603 l = ref and ref()
1607 l = ref and ref()
1604 if l and l.held:
1608 if l and l.held:
1605 l.postrelease.append(callback)
1609 l.postrelease.append(callback)
1606 break
1610 break
1607 else: # no lock have been found.
1611 else: # no lock have been found.
1608 callback()
1612 callback()
1609
1613
1610 def lock(self, wait=True):
1614 def lock(self, wait=True):
1611 '''Lock the repository store (.hg/store) and return a weak reference
1615 '''Lock the repository store (.hg/store) and return a weak reference
1612 to the lock. Use this before modifying the store (e.g. committing or
1616 to the lock. Use this before modifying the store (e.g. committing or
1613 stripping). If you are opening a transaction, get a lock as well.)
1617 stripping). If you are opening a transaction, get a lock as well.)
1614
1618
1615 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1619 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1616 'wlock' first to avoid a dead-lock hazard.'''
1620 'wlock' first to avoid a dead-lock hazard.'''
1617 l = self._currentlock(self._lockref)
1621 l = self._currentlock(self._lockref)
1618 if l is not None:
1622 if l is not None:
1619 l.lock()
1623 l.lock()
1620 return l
1624 return l
1621
1625
1622 l = self._lock(self.svfs, "lock", wait, None,
1626 l = self._lock(self.svfs, "lock", wait, None,
1623 self.invalidate, _('repository %s') % self.origroot)
1627 self.invalidate, _('repository %s') % self.origroot)
1624 self._lockref = weakref.ref(l)
1628 self._lockref = weakref.ref(l)
1625 return l
1629 return l
1626
1630
1627 def _wlockchecktransaction(self):
1631 def _wlockchecktransaction(self):
1628 if self.currenttransaction() is not None:
1632 if self.currenttransaction() is not None:
1629 raise error.LockInheritanceContractViolation(
1633 raise error.LockInheritanceContractViolation(
1630 'wlock cannot be inherited in the middle of a transaction')
1634 'wlock cannot be inherited in the middle of a transaction')
1631
1635
1632 def wlock(self, wait=True):
1636 def wlock(self, wait=True):
1633 '''Lock the non-store parts of the repository (everything under
1637 '''Lock the non-store parts of the repository (everything under
1634 .hg except .hg/store) and return a weak reference to the lock.
1638 .hg except .hg/store) and return a weak reference to the lock.
1635
1639
1636 Use this before modifying files in .hg.
1640 Use this before modifying files in .hg.
1637
1641
1638 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1642 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1639 'wlock' first to avoid a dead-lock hazard.'''
1643 'wlock' first to avoid a dead-lock hazard.'''
1640 l = self._wlockref and self._wlockref()
1644 l = self._wlockref and self._wlockref()
1641 if l is not None and l.held:
1645 if l is not None and l.held:
1642 l.lock()
1646 l.lock()
1643 return l
1647 return l
1644
1648
1645 # We do not need to check for non-waiting lock acquisition. Such
1649 # We do not need to check for non-waiting lock acquisition. Such
1646 # acquisition would not cause dead-lock as they would just fail.
1650 # acquisition would not cause dead-lock as they would just fail.
1647 if wait and (self.ui.configbool('devel', 'all-warnings')
1651 if wait and (self.ui.configbool('devel', 'all-warnings')
1648 or self.ui.configbool('devel', 'check-locks')):
1652 or self.ui.configbool('devel', 'check-locks')):
1649 if self._currentlock(self._lockref) is not None:
1653 if self._currentlock(self._lockref) is not None:
1650 self.ui.develwarn('"wlock" acquired after "lock"')
1654 self.ui.develwarn('"wlock" acquired after "lock"')
1651
1655
1652 def unlock():
1656 def unlock():
1653 if self.dirstate.pendingparentchange():
1657 if self.dirstate.pendingparentchange():
1654 self.dirstate.invalidate()
1658 self.dirstate.invalidate()
1655 else:
1659 else:
1656 self.dirstate.write(None)
1660 self.dirstate.write(None)
1657
1661
1658 self._filecache['dirstate'].refresh()
1662 self._filecache['dirstate'].refresh()
1659
1663
1660 l = self._lock(self.vfs, "wlock", wait, unlock,
1664 l = self._lock(self.vfs, "wlock", wait, unlock,
1661 self.invalidatedirstate, _('working directory of %s') %
1665 self.invalidatedirstate, _('working directory of %s') %
1662 self.origroot,
1666 self.origroot,
1663 inheritchecker=self._wlockchecktransaction,
1667 inheritchecker=self._wlockchecktransaction,
1664 parentenvvar='HG_WLOCK_LOCKER')
1668 parentenvvar='HG_WLOCK_LOCKER')
1665 self._wlockref = weakref.ref(l)
1669 self._wlockref = weakref.ref(l)
1666 return l
1670 return l
1667
1671
1668 def _currentlock(self, lockref):
1672 def _currentlock(self, lockref):
1669 """Returns the lock if it's held, or None if it's not."""
1673 """Returns the lock if it's held, or None if it's not."""
1670 if lockref is None:
1674 if lockref is None:
1671 return None
1675 return None
1672 l = lockref()
1676 l = lockref()
1673 if l is None or not l.held:
1677 if l is None or not l.held:
1674 return None
1678 return None
1675 return l
1679 return l
1676
1680
1677 def currentwlock(self):
1681 def currentwlock(self):
1678 """Returns the wlock if it's held, or None if it's not."""
1682 """Returns the wlock if it's held, or None if it's not."""
1679 return self._currentlock(self._wlockref)
1683 return self._currentlock(self._wlockref)
1680
1684
1681 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1685 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1682 """
1686 """
1683 commit an individual file as part of a larger transaction
1687 commit an individual file as part of a larger transaction
1684 """
1688 """
1685
1689
1686 fname = fctx.path()
1690 fname = fctx.path()
1687 fparent1 = manifest1.get(fname, nullid)
1691 fparent1 = manifest1.get(fname, nullid)
1688 fparent2 = manifest2.get(fname, nullid)
1692 fparent2 = manifest2.get(fname, nullid)
1689 if isinstance(fctx, context.filectx):
1693 if isinstance(fctx, context.filectx):
1690 node = fctx.filenode()
1694 node = fctx.filenode()
1691 if node in [fparent1, fparent2]:
1695 if node in [fparent1, fparent2]:
1692 self.ui.debug('reusing %s filelog entry\n' % fname)
1696 self.ui.debug('reusing %s filelog entry\n' % fname)
1693 if manifest1.flags(fname) != fctx.flags():
1697 if manifest1.flags(fname) != fctx.flags():
1694 changelist.append(fname)
1698 changelist.append(fname)
1695 return node
1699 return node
1696
1700
1697 flog = self.file(fname)
1701 flog = self.file(fname)
1698 meta = {}
1702 meta = {}
1699 copy = fctx.renamed()
1703 copy = fctx.renamed()
1700 if copy and copy[0] != fname:
1704 if copy and copy[0] != fname:
1701 # Mark the new revision of this file as a copy of another
1705 # Mark the new revision of this file as a copy of another
1702 # file. This copy data will effectively act as a parent
1706 # file. This copy data will effectively act as a parent
1703 # of this new revision. If this is a merge, the first
1707 # of this new revision. If this is a merge, the first
1704 # parent will be the nullid (meaning "look up the copy data")
1708 # parent will be the nullid (meaning "look up the copy data")
1705 # and the second one will be the other parent. For example:
1709 # and the second one will be the other parent. For example:
1706 #
1710 #
1707 # 0 --- 1 --- 3 rev1 changes file foo
1711 # 0 --- 1 --- 3 rev1 changes file foo
1708 # \ / rev2 renames foo to bar and changes it
1712 # \ / rev2 renames foo to bar and changes it
1709 # \- 2 -/ rev3 should have bar with all changes and
1713 # \- 2 -/ rev3 should have bar with all changes and
1710 # should record that bar descends from
1714 # should record that bar descends from
1711 # bar in rev2 and foo in rev1
1715 # bar in rev2 and foo in rev1
1712 #
1716 #
1713 # this allows this merge to succeed:
1717 # this allows this merge to succeed:
1714 #
1718 #
1715 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1719 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1716 # \ / merging rev3 and rev4 should use bar@rev2
1720 # \ / merging rev3 and rev4 should use bar@rev2
1717 # \- 2 --- 4 as the merge base
1721 # \- 2 --- 4 as the merge base
1718 #
1722 #
1719
1723
1720 cfname = copy[0]
1724 cfname = copy[0]
1721 crev = manifest1.get(cfname)
1725 crev = manifest1.get(cfname)
1722 newfparent = fparent2
1726 newfparent = fparent2
1723
1727
1724 if manifest2: # branch merge
1728 if manifest2: # branch merge
1725 if fparent2 == nullid or crev is None: # copied on remote side
1729 if fparent2 == nullid or crev is None: # copied on remote side
1726 if cfname in manifest2:
1730 if cfname in manifest2:
1727 crev = manifest2[cfname]
1731 crev = manifest2[cfname]
1728 newfparent = fparent1
1732 newfparent = fparent1
1729
1733
1730 # Here, we used to search backwards through history to try to find
1734 # Here, we used to search backwards through history to try to find
1731 # where the file copy came from if the source of a copy was not in
1735 # where the file copy came from if the source of a copy was not in
1732 # the parent directory. However, this doesn't actually make sense to
1736 # the parent directory. However, this doesn't actually make sense to
1733 # do (what does a copy from something not in your working copy even
1737 # do (what does a copy from something not in your working copy even
1734 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1738 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1735 # the user that copy information was dropped, so if they didn't
1739 # the user that copy information was dropped, so if they didn't
1736 # expect this outcome it can be fixed, but this is the correct
1740 # expect this outcome it can be fixed, but this is the correct
1737 # behavior in this circumstance.
1741 # behavior in this circumstance.
1738
1742
1739 if crev:
1743 if crev:
1740 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1744 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1741 meta["copy"] = cfname
1745 meta["copy"] = cfname
1742 meta["copyrev"] = hex(crev)
1746 meta["copyrev"] = hex(crev)
1743 fparent1, fparent2 = nullid, newfparent
1747 fparent1, fparent2 = nullid, newfparent
1744 else:
1748 else:
1745 self.ui.warn(_("warning: can't find ancestor for '%s' "
1749 self.ui.warn(_("warning: can't find ancestor for '%s' "
1746 "copied from '%s'!\n") % (fname, cfname))
1750 "copied from '%s'!\n") % (fname, cfname))
1747
1751
1748 elif fparent1 == nullid:
1752 elif fparent1 == nullid:
1749 fparent1, fparent2 = fparent2, nullid
1753 fparent1, fparent2 = fparent2, nullid
1750 elif fparent2 != nullid:
1754 elif fparent2 != nullid:
1751 # is one parent an ancestor of the other?
1755 # is one parent an ancestor of the other?
1752 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1756 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1753 if fparent1 in fparentancestors:
1757 if fparent1 in fparentancestors:
1754 fparent1, fparent2 = fparent2, nullid
1758 fparent1, fparent2 = fparent2, nullid
1755 elif fparent2 in fparentancestors:
1759 elif fparent2 in fparentancestors:
1756 fparent2 = nullid
1760 fparent2 = nullid
1757
1761
1758 # is the file changed?
1762 # is the file changed?
1759 text = fctx.data()
1763 text = fctx.data()
1760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1764 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1761 changelist.append(fname)
1765 changelist.append(fname)
1762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1766 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1763 # are just the flags changed during merge?
1767 # are just the flags changed during merge?
1764 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1768 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1765 changelist.append(fname)
1769 changelist.append(fname)
1766
1770
1767 return fparent1
1771 return fparent1
1768
1772
1769 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1773 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1770 """check for commit arguments that aren't committable"""
1774 """check for commit arguments that aren't committable"""
1771 if match.isexact() or match.prefix():
1775 if match.isexact() or match.prefix():
1772 matched = set(status.modified + status.added + status.removed)
1776 matched = set(status.modified + status.added + status.removed)
1773
1777
1774 for f in match.files():
1778 for f in match.files():
1775 f = self.dirstate.normalize(f)
1779 f = self.dirstate.normalize(f)
1776 if f == '.' or f in matched or f in wctx.substate:
1780 if f == '.' or f in matched or f in wctx.substate:
1777 continue
1781 continue
1778 if f in status.deleted:
1782 if f in status.deleted:
1779 fail(f, _('file not found!'))
1783 fail(f, _('file not found!'))
1780 if f in vdirs: # visited directory
1784 if f in vdirs: # visited directory
1781 d = f + '/'
1785 d = f + '/'
1782 for mf in matched:
1786 for mf in matched:
1783 if mf.startswith(d):
1787 if mf.startswith(d):
1784 break
1788 break
1785 else:
1789 else:
1786 fail(f, _("no match under directory!"))
1790 fail(f, _("no match under directory!"))
1787 elif f not in self.dirstate:
1791 elif f not in self.dirstate:
1788 fail(f, _("file not tracked!"))
1792 fail(f, _("file not tracked!"))
1789
1793
1790 @unfilteredmethod
1794 @unfilteredmethod
1791 def commit(self, text="", user=None, date=None, match=None, force=False,
1795 def commit(self, text="", user=None, date=None, match=None, force=False,
1792 editor=False, extra=None):
1796 editor=False, extra=None):
1793 """Add a new revision to current repository.
1797 """Add a new revision to current repository.
1794
1798
1795 Revision information is gathered from the working directory,
1799 Revision information is gathered from the working directory,
1796 match can be used to filter the committed files. If editor is
1800 match can be used to filter the committed files. If editor is
1797 supplied, it is called to get a commit message.
1801 supplied, it is called to get a commit message.
1798 """
1802 """
1799 if extra is None:
1803 if extra is None:
1800 extra = {}
1804 extra = {}
1801
1805
1802 def fail(f, msg):
1806 def fail(f, msg):
1803 raise error.Abort('%s: %s' % (f, msg))
1807 raise error.Abort('%s: %s' % (f, msg))
1804
1808
1805 if not match:
1809 if not match:
1806 match = matchmod.always(self.root, '')
1810 match = matchmod.always(self.root, '')
1807
1811
1808 if not force:
1812 if not force:
1809 vdirs = []
1813 vdirs = []
1810 match.explicitdir = vdirs.append
1814 match.explicitdir = vdirs.append
1811 match.bad = fail
1815 match.bad = fail
1812
1816
1813 wlock = lock = tr = None
1817 wlock = lock = tr = None
1814 try:
1818 try:
1815 wlock = self.wlock()
1819 wlock = self.wlock()
1816 lock = self.lock() # for recent changelog (see issue4368)
1820 lock = self.lock() # for recent changelog (see issue4368)
1817
1821
1818 wctx = self[None]
1822 wctx = self[None]
1819 merge = len(wctx.parents()) > 1
1823 merge = len(wctx.parents()) > 1
1820
1824
1821 if not force and merge and not match.always():
1825 if not force and merge and not match.always():
1822 raise error.Abort(_('cannot partially commit a merge '
1826 raise error.Abort(_('cannot partially commit a merge '
1823 '(do not specify files or patterns)'))
1827 '(do not specify files or patterns)'))
1824
1828
1825 status = self.status(match=match, clean=force)
1829 status = self.status(match=match, clean=force)
1826 if force:
1830 if force:
1827 status.modified.extend(status.clean) # mq may commit clean files
1831 status.modified.extend(status.clean) # mq may commit clean files
1828
1832
1829 # check subrepos
1833 # check subrepos
1830 subs, commitsubs, newstate = subrepo.precommit(
1834 subs, commitsubs, newstate = subrepo.precommit(
1831 self.ui, wctx, status, match, force=force)
1835 self.ui, wctx, status, match, force=force)
1832
1836
1833 # make sure all explicit patterns are matched
1837 # make sure all explicit patterns are matched
1834 if not force:
1838 if not force:
1835 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1839 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1836
1840
1837 cctx = context.workingcommitctx(self, status,
1841 cctx = context.workingcommitctx(self, status,
1838 text, user, date, extra)
1842 text, user, date, extra)
1839
1843
1840 # internal config: ui.allowemptycommit
1844 # internal config: ui.allowemptycommit
1841 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1845 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1842 or extra.get('close') or merge or cctx.files()
1846 or extra.get('close') or merge or cctx.files()
1843 or self.ui.configbool('ui', 'allowemptycommit'))
1847 or self.ui.configbool('ui', 'allowemptycommit'))
1844 if not allowemptycommit:
1848 if not allowemptycommit:
1845 return None
1849 return None
1846
1850
1847 if merge and cctx.deleted():
1851 if merge and cctx.deleted():
1848 raise error.Abort(_("cannot commit merge with missing files"))
1852 raise error.Abort(_("cannot commit merge with missing files"))
1849
1853
1850 ms = mergemod.mergestate.read(self)
1854 ms = mergemod.mergestate.read(self)
1851 mergeutil.checkunresolved(ms)
1855 mergeutil.checkunresolved(ms)
1852
1856
1853 if editor:
1857 if editor:
1854 cctx._text = editor(self, cctx, subs)
1858 cctx._text = editor(self, cctx, subs)
1855 edited = (text != cctx._text)
1859 edited = (text != cctx._text)
1856
1860
1857 # Save commit message in case this transaction gets rolled back
1861 # Save commit message in case this transaction gets rolled back
1858 # (e.g. by a pretxncommit hook). Leave the content alone on
1862 # (e.g. by a pretxncommit hook). Leave the content alone on
1859 # the assumption that the user will use the same editor again.
1863 # the assumption that the user will use the same editor again.
1860 msgfn = self.savecommitmessage(cctx._text)
1864 msgfn = self.savecommitmessage(cctx._text)
1861
1865
1862 # commit subs and write new state
1866 # commit subs and write new state
1863 if subs:
1867 if subs:
1864 for s in sorted(commitsubs):
1868 for s in sorted(commitsubs):
1865 sub = wctx.sub(s)
1869 sub = wctx.sub(s)
1866 self.ui.status(_('committing subrepository %s\n') %
1870 self.ui.status(_('committing subrepository %s\n') %
1867 subrepo.subrelpath(sub))
1871 subrepo.subrelpath(sub))
1868 sr = sub.commit(cctx._text, user, date)
1872 sr = sub.commit(cctx._text, user, date)
1869 newstate[s] = (newstate[s][0], sr)
1873 newstate[s] = (newstate[s][0], sr)
1870 subrepo.writestate(self, newstate)
1874 subrepo.writestate(self, newstate)
1871
1875
1872 p1, p2 = self.dirstate.parents()
1876 p1, p2 = self.dirstate.parents()
1873 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1877 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1874 try:
1878 try:
1875 self.hook("precommit", throw=True, parent1=hookp1,
1879 self.hook("precommit", throw=True, parent1=hookp1,
1876 parent2=hookp2)
1880 parent2=hookp2)
1877 tr = self.transaction('commit')
1881 tr = self.transaction('commit')
1878 ret = self.commitctx(cctx, True)
1882 ret = self.commitctx(cctx, True)
1879 except: # re-raises
1883 except: # re-raises
1880 if edited:
1884 if edited:
1881 self.ui.write(
1885 self.ui.write(
1882 _('note: commit message saved in %s\n') % msgfn)
1886 _('note: commit message saved in %s\n') % msgfn)
1883 raise
1887 raise
1884 # update bookmarks, dirstate and mergestate
1888 # update bookmarks, dirstate and mergestate
1885 bookmarks.update(self, [p1, p2], ret)
1889 bookmarks.update(self, [p1, p2], ret)
1886 cctx.markcommitted(ret)
1890 cctx.markcommitted(ret)
1887 ms.reset()
1891 ms.reset()
1888 tr.close()
1892 tr.close()
1889
1893
1890 finally:
1894 finally:
1891 lockmod.release(tr, lock, wlock)
1895 lockmod.release(tr, lock, wlock)
1892
1896
1893 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1897 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1894 # hack for command that use a temporary commit (eg: histedit)
1898 # hack for command that use a temporary commit (eg: histedit)
1895 # temporary commit got stripped before hook release
1899 # temporary commit got stripped before hook release
1896 if self.changelog.hasnode(ret):
1900 if self.changelog.hasnode(ret):
1897 self.hook("commit", node=node, parent1=parent1,
1901 self.hook("commit", node=node, parent1=parent1,
1898 parent2=parent2)
1902 parent2=parent2)
1899 self._afterlock(commithook)
1903 self._afterlock(commithook)
1900 return ret
1904 return ret
1901
1905
1902 @unfilteredmethod
1906 @unfilteredmethod
1903 def commitctx(self, ctx, error=False):
1907 def commitctx(self, ctx, error=False):
1904 """Add a new revision to current repository.
1908 """Add a new revision to current repository.
1905 Revision information is passed via the context argument.
1909 Revision information is passed via the context argument.
1906 """
1910 """
1907
1911
1908 tr = None
1912 tr = None
1909 p1, p2 = ctx.p1(), ctx.p2()
1913 p1, p2 = ctx.p1(), ctx.p2()
1910 user = ctx.user()
1914 user = ctx.user()
1911
1915
1912 lock = self.lock()
1916 lock = self.lock()
1913 try:
1917 try:
1914 tr = self.transaction("commit")
1918 tr = self.transaction("commit")
1915 trp = weakref.proxy(tr)
1919 trp = weakref.proxy(tr)
1916
1920
1917 if ctx.manifestnode():
1921 if ctx.manifestnode():
1918 # reuse an existing manifest revision
1922 # reuse an existing manifest revision
1919 mn = ctx.manifestnode()
1923 mn = ctx.manifestnode()
1920 files = ctx.files()
1924 files = ctx.files()
1921 elif ctx.files():
1925 elif ctx.files():
1922 m1ctx = p1.manifestctx()
1926 m1ctx = p1.manifestctx()
1923 m2ctx = p2.manifestctx()
1927 m2ctx = p2.manifestctx()
1924 mctx = m1ctx.copy()
1928 mctx = m1ctx.copy()
1925
1929
1926 m = mctx.read()
1930 m = mctx.read()
1927 m1 = m1ctx.read()
1931 m1 = m1ctx.read()
1928 m2 = m2ctx.read()
1932 m2 = m2ctx.read()
1929
1933
1930 # check in files
1934 # check in files
1931 added = []
1935 added = []
1932 changed = []
1936 changed = []
1933 removed = list(ctx.removed())
1937 removed = list(ctx.removed())
1934 linkrev = len(self)
1938 linkrev = len(self)
1935 self.ui.note(_("committing files:\n"))
1939 self.ui.note(_("committing files:\n"))
1936 for f in sorted(ctx.modified() + ctx.added()):
1940 for f in sorted(ctx.modified() + ctx.added()):
1937 self.ui.note(f + "\n")
1941 self.ui.note(f + "\n")
1938 try:
1942 try:
1939 fctx = ctx[f]
1943 fctx = ctx[f]
1940 if fctx is None:
1944 if fctx is None:
1941 removed.append(f)
1945 removed.append(f)
1942 else:
1946 else:
1943 added.append(f)
1947 added.append(f)
1944 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1948 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1945 trp, changed)
1949 trp, changed)
1946 m.setflag(f, fctx.flags())
1950 m.setflag(f, fctx.flags())
1947 except OSError as inst:
1951 except OSError as inst:
1948 self.ui.warn(_("trouble committing %s!\n") % f)
1952 self.ui.warn(_("trouble committing %s!\n") % f)
1949 raise
1953 raise
1950 except IOError as inst:
1954 except IOError as inst:
1951 errcode = getattr(inst, 'errno', errno.ENOENT)
1955 errcode = getattr(inst, 'errno', errno.ENOENT)
1952 if error or errcode and errcode != errno.ENOENT:
1956 if error or errcode and errcode != errno.ENOENT:
1953 self.ui.warn(_("trouble committing %s!\n") % f)
1957 self.ui.warn(_("trouble committing %s!\n") % f)
1954 raise
1958 raise
1955
1959
1956 # update manifest
1960 # update manifest
1957 self.ui.note(_("committing manifest\n"))
1961 self.ui.note(_("committing manifest\n"))
1958 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1962 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1959 drop = [f for f in removed if f in m]
1963 drop = [f for f in removed if f in m]
1960 for f in drop:
1964 for f in drop:
1961 del m[f]
1965 del m[f]
1962 mn = mctx.write(trp, linkrev,
1966 mn = mctx.write(trp, linkrev,
1963 p1.manifestnode(), p2.manifestnode(),
1967 p1.manifestnode(), p2.manifestnode(),
1964 added, drop)
1968 added, drop)
1965 files = changed + removed
1969 files = changed + removed
1966 else:
1970 else:
1967 mn = p1.manifestnode()
1971 mn = p1.manifestnode()
1968 files = []
1972 files = []
1969
1973
1970 # update changelog
1974 # update changelog
1971 self.ui.note(_("committing changelog\n"))
1975 self.ui.note(_("committing changelog\n"))
1972 self.changelog.delayupdate(tr)
1976 self.changelog.delayupdate(tr)
1973 n = self.changelog.add(mn, files, ctx.description(),
1977 n = self.changelog.add(mn, files, ctx.description(),
1974 trp, p1.node(), p2.node(),
1978 trp, p1.node(), p2.node(),
1975 user, ctx.date(), ctx.extra().copy())
1979 user, ctx.date(), ctx.extra().copy())
1976 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1980 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1977 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1981 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1978 parent2=xp2)
1982 parent2=xp2)
1979 # set the new commit is proper phase
1983 # set the new commit is proper phase
1980 targetphase = subrepo.newcommitphase(self.ui, ctx)
1984 targetphase = subrepo.newcommitphase(self.ui, ctx)
1981 if targetphase:
1985 if targetphase:
1982 # retract boundary do not alter parent changeset.
1986 # retract boundary do not alter parent changeset.
1983 # if a parent have higher the resulting phase will
1987 # if a parent have higher the resulting phase will
1984 # be compliant anyway
1988 # be compliant anyway
1985 #
1989 #
1986 # if minimal phase was 0 we don't need to retract anything
1990 # if minimal phase was 0 we don't need to retract anything
1987 phases.registernew(self, tr, targetphase, [n])
1991 phases.registernew(self, tr, targetphase, [n])
1988 tr.close()
1992 tr.close()
1989 return n
1993 return n
1990 finally:
1994 finally:
1991 if tr:
1995 if tr:
1992 tr.release()
1996 tr.release()
1993 lock.release()
1997 lock.release()
1994
1998
1995 @unfilteredmethod
1999 @unfilteredmethod
1996 def destroying(self):
2000 def destroying(self):
1997 '''Inform the repository that nodes are about to be destroyed.
2001 '''Inform the repository that nodes are about to be destroyed.
1998 Intended for use by strip and rollback, so there's a common
2002 Intended for use by strip and rollback, so there's a common
1999 place for anything that has to be done before destroying history.
2003 place for anything that has to be done before destroying history.
2000
2004
2001 This is mostly useful for saving state that is in memory and waiting
2005 This is mostly useful for saving state that is in memory and waiting
2002 to be flushed when the current lock is released. Because a call to
2006 to be flushed when the current lock is released. Because a call to
2003 destroyed is imminent, the repo will be invalidated causing those
2007 destroyed is imminent, the repo will be invalidated causing those
2004 changes to stay in memory (waiting for the next unlock), or vanish
2008 changes to stay in memory (waiting for the next unlock), or vanish
2005 completely.
2009 completely.
2006 '''
2010 '''
2007 # When using the same lock to commit and strip, the phasecache is left
2011 # When using the same lock to commit and strip, the phasecache is left
2008 # dirty after committing. Then when we strip, the repo is invalidated,
2012 # dirty after committing. Then when we strip, the repo is invalidated,
2009 # causing those changes to disappear.
2013 # causing those changes to disappear.
2010 if '_phasecache' in vars(self):
2014 if '_phasecache' in vars(self):
2011 self._phasecache.write()
2015 self._phasecache.write()
2012
2016
2013 @unfilteredmethod
2017 @unfilteredmethod
2014 def destroyed(self):
2018 def destroyed(self):
2015 '''Inform the repository that nodes have been destroyed.
2019 '''Inform the repository that nodes have been destroyed.
2016 Intended for use by strip and rollback, so there's a common
2020 Intended for use by strip and rollback, so there's a common
2017 place for anything that has to be done after destroying history.
2021 place for anything that has to be done after destroying history.
2018 '''
2022 '''
2019 # When one tries to:
2023 # When one tries to:
2020 # 1) destroy nodes thus calling this method (e.g. strip)
2024 # 1) destroy nodes thus calling this method (e.g. strip)
2021 # 2) use phasecache somewhere (e.g. commit)
2025 # 2) use phasecache somewhere (e.g. commit)
2022 #
2026 #
2023 # then 2) will fail because the phasecache contains nodes that were
2027 # then 2) will fail because the phasecache contains nodes that were
2024 # removed. We can either remove phasecache from the filecache,
2028 # removed. We can either remove phasecache from the filecache,
2025 # causing it to reload next time it is accessed, or simply filter
2029 # causing it to reload next time it is accessed, or simply filter
2026 # the removed nodes now and write the updated cache.
2030 # the removed nodes now and write the updated cache.
2027 self._phasecache.filterunknown(self)
2031 self._phasecache.filterunknown(self)
2028 self._phasecache.write()
2032 self._phasecache.write()
2029
2033
2030 # refresh all repository caches
2034 # refresh all repository caches
2031 self.updatecaches()
2035 self.updatecaches()
2032
2036
2033 # Ensure the persistent tag cache is updated. Doing it now
2037 # Ensure the persistent tag cache is updated. Doing it now
2034 # means that the tag cache only has to worry about destroyed
2038 # means that the tag cache only has to worry about destroyed
2035 # heads immediately after a strip/rollback. That in turn
2039 # heads immediately after a strip/rollback. That in turn
2036 # guarantees that "cachetip == currenttip" (comparing both rev
2040 # guarantees that "cachetip == currenttip" (comparing both rev
2037 # and node) always means no nodes have been added or destroyed.
2041 # and node) always means no nodes have been added or destroyed.
2038
2042
2039 # XXX this is suboptimal when qrefresh'ing: we strip the current
2043 # XXX this is suboptimal when qrefresh'ing: we strip the current
2040 # head, refresh the tag cache, then immediately add a new head.
2044 # head, refresh the tag cache, then immediately add a new head.
2041 # But I think doing it this way is necessary for the "instant
2045 # But I think doing it this way is necessary for the "instant
2042 # tag cache retrieval" case to work.
2046 # tag cache retrieval" case to work.
2043 self.invalidate()
2047 self.invalidate()
2044
2048
2045 def walk(self, match, node=None):
2049 def walk(self, match, node=None):
2046 '''
2050 '''
2047 walk recursively through the directory tree or a given
2051 walk recursively through the directory tree or a given
2048 changeset, finding all files matched by the match
2052 changeset, finding all files matched by the match
2049 function
2053 function
2050 '''
2054 '''
2051 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2055 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2052 return self[node].walk(match)
2056 return self[node].walk(match)
2053
2057
2054 def status(self, node1='.', node2=None, match=None,
2058 def status(self, node1='.', node2=None, match=None,
2055 ignored=False, clean=False, unknown=False,
2059 ignored=False, clean=False, unknown=False,
2056 listsubrepos=False):
2060 listsubrepos=False):
2057 '''a convenience method that calls node1.status(node2)'''
2061 '''a convenience method that calls node1.status(node2)'''
2058 return self[node1].status(node2, match, ignored, clean, unknown,
2062 return self[node1].status(node2, match, ignored, clean, unknown,
2059 listsubrepos)
2063 listsubrepos)
2060
2064
2061 def addpostdsstatus(self, ps):
2065 def addpostdsstatus(self, ps):
2062 """Add a callback to run within the wlock, at the point at which status
2066 """Add a callback to run within the wlock, at the point at which status
2063 fixups happen.
2067 fixups happen.
2064
2068
2065 On status completion, callback(wctx, status) will be called with the
2069 On status completion, callback(wctx, status) will be called with the
2066 wlock held, unless the dirstate has changed from underneath or the wlock
2070 wlock held, unless the dirstate has changed from underneath or the wlock
2067 couldn't be grabbed.
2071 couldn't be grabbed.
2068
2072
2069 Callbacks should not capture and use a cached copy of the dirstate --
2073 Callbacks should not capture and use a cached copy of the dirstate --
2070 it might change in the meanwhile. Instead, they should access the
2074 it might change in the meanwhile. Instead, they should access the
2071 dirstate via wctx.repo().dirstate.
2075 dirstate via wctx.repo().dirstate.
2072
2076
2073 This list is emptied out after each status run -- extensions should
2077 This list is emptied out after each status run -- extensions should
2074 make sure it adds to this list each time dirstate.status is called.
2078 make sure it adds to this list each time dirstate.status is called.
2075 Extensions should also make sure they don't call this for statuses
2079 Extensions should also make sure they don't call this for statuses
2076 that don't involve the dirstate.
2080 that don't involve the dirstate.
2077 """
2081 """
2078
2082
2079 # The list is located here for uniqueness reasons -- it is actually
2083 # The list is located here for uniqueness reasons -- it is actually
2080 # managed by the workingctx, but that isn't unique per-repo.
2084 # managed by the workingctx, but that isn't unique per-repo.
2081 self._postdsstatus.append(ps)
2085 self._postdsstatus.append(ps)
2082
2086
2083 def postdsstatus(self):
2087 def postdsstatus(self):
2084 """Used by workingctx to get the list of post-dirstate-status hooks."""
2088 """Used by workingctx to get the list of post-dirstate-status hooks."""
2085 return self._postdsstatus
2089 return self._postdsstatus
2086
2090
2087 def clearpostdsstatus(self):
2091 def clearpostdsstatus(self):
2088 """Used by workingctx to clear post-dirstate-status hooks."""
2092 """Used by workingctx to clear post-dirstate-status hooks."""
2089 del self._postdsstatus[:]
2093 del self._postdsstatus[:]
2090
2094
2091 def heads(self, start=None):
2095 def heads(self, start=None):
2092 if start is None:
2096 if start is None:
2093 cl = self.changelog
2097 cl = self.changelog
2094 headrevs = reversed(cl.headrevs())
2098 headrevs = reversed(cl.headrevs())
2095 return [cl.node(rev) for rev in headrevs]
2099 return [cl.node(rev) for rev in headrevs]
2096
2100
2097 heads = self.changelog.heads(start)
2101 heads = self.changelog.heads(start)
2098 # sort the output in rev descending order
2102 # sort the output in rev descending order
2099 return sorted(heads, key=self.changelog.rev, reverse=True)
2103 return sorted(heads, key=self.changelog.rev, reverse=True)
2100
2104
2101 def branchheads(self, branch=None, start=None, closed=False):
2105 def branchheads(self, branch=None, start=None, closed=False):
2102 '''return a (possibly filtered) list of heads for the given branch
2106 '''return a (possibly filtered) list of heads for the given branch
2103
2107
2104 Heads are returned in topological order, from newest to oldest.
2108 Heads are returned in topological order, from newest to oldest.
2105 If branch is None, use the dirstate branch.
2109 If branch is None, use the dirstate branch.
2106 If start is not None, return only heads reachable from start.
2110 If start is not None, return only heads reachable from start.
2107 If closed is True, return heads that are marked as closed as well.
2111 If closed is True, return heads that are marked as closed as well.
2108 '''
2112 '''
2109 if branch is None:
2113 if branch is None:
2110 branch = self[None].branch()
2114 branch = self[None].branch()
2111 branches = self.branchmap()
2115 branches = self.branchmap()
2112 if branch not in branches:
2116 if branch not in branches:
2113 return []
2117 return []
2114 # the cache returns heads ordered lowest to highest
2118 # the cache returns heads ordered lowest to highest
2115 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2119 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2116 if start is not None:
2120 if start is not None:
2117 # filter out the heads that cannot be reached from startrev
2121 # filter out the heads that cannot be reached from startrev
2118 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2122 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2119 bheads = [h for h in bheads if h in fbheads]
2123 bheads = [h for h in bheads if h in fbheads]
2120 return bheads
2124 return bheads
2121
2125
2122 def branches(self, nodes):
2126 def branches(self, nodes):
2123 if not nodes:
2127 if not nodes:
2124 nodes = [self.changelog.tip()]
2128 nodes = [self.changelog.tip()]
2125 b = []
2129 b = []
2126 for n in nodes:
2130 for n in nodes:
2127 t = n
2131 t = n
2128 while True:
2132 while True:
2129 p = self.changelog.parents(n)
2133 p = self.changelog.parents(n)
2130 if p[1] != nullid or p[0] == nullid:
2134 if p[1] != nullid or p[0] == nullid:
2131 b.append((t, n, p[0], p[1]))
2135 b.append((t, n, p[0], p[1]))
2132 break
2136 break
2133 n = p[0]
2137 n = p[0]
2134 return b
2138 return b
2135
2139
2136 def between(self, pairs):
2140 def between(self, pairs):
2137 r = []
2141 r = []
2138
2142
2139 for top, bottom in pairs:
2143 for top, bottom in pairs:
2140 n, l, i = top, [], 0
2144 n, l, i = top, [], 0
2141 f = 1
2145 f = 1
2142
2146
2143 while n != bottom and n != nullid:
2147 while n != bottom and n != nullid:
2144 p = self.changelog.parents(n)[0]
2148 p = self.changelog.parents(n)[0]
2145 if i == f:
2149 if i == f:
2146 l.append(n)
2150 l.append(n)
2147 f = f * 2
2151 f = f * 2
2148 n = p
2152 n = p
2149 i += 1
2153 i += 1
2150
2154
2151 r.append(l)
2155 r.append(l)
2152
2156
2153 return r
2157 return r
2154
2158
2155 def checkpush(self, pushop):
2159 def checkpush(self, pushop):
2156 """Extensions can override this function if additional checks have
2160 """Extensions can override this function if additional checks have
2157 to be performed before pushing, or call it if they override push
2161 to be performed before pushing, or call it if they override push
2158 command.
2162 command.
2159 """
2163 """
2160
2164
2161 @unfilteredpropertycache
2165 @unfilteredpropertycache
2162 def prepushoutgoinghooks(self):
2166 def prepushoutgoinghooks(self):
2163 """Return util.hooks consists of a pushop with repo, remote, outgoing
2167 """Return util.hooks consists of a pushop with repo, remote, outgoing
2164 methods, which are called before pushing changesets.
2168 methods, which are called before pushing changesets.
2165 """
2169 """
2166 return util.hooks()
2170 return util.hooks()
2167
2171
2168 def pushkey(self, namespace, key, old, new):
2172 def pushkey(self, namespace, key, old, new):
2169 try:
2173 try:
2170 tr = self.currenttransaction()
2174 tr = self.currenttransaction()
2171 hookargs = {}
2175 hookargs = {}
2172 if tr is not None:
2176 if tr is not None:
2173 hookargs.update(tr.hookargs)
2177 hookargs.update(tr.hookargs)
2174 hookargs['namespace'] = namespace
2178 hookargs['namespace'] = namespace
2175 hookargs['key'] = key
2179 hookargs['key'] = key
2176 hookargs['old'] = old
2180 hookargs['old'] = old
2177 hookargs['new'] = new
2181 hookargs['new'] = new
2178 self.hook('prepushkey', throw=True, **hookargs)
2182 self.hook('prepushkey', throw=True, **hookargs)
2179 except error.HookAbort as exc:
2183 except error.HookAbort as exc:
2180 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2184 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2181 if exc.hint:
2185 if exc.hint:
2182 self.ui.write_err(_("(%s)\n") % exc.hint)
2186 self.ui.write_err(_("(%s)\n") % exc.hint)
2183 return False
2187 return False
2184 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2188 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2185 ret = pushkey.push(self, namespace, key, old, new)
2189 ret = pushkey.push(self, namespace, key, old, new)
2186 def runhook():
2190 def runhook():
2187 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2191 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2188 ret=ret)
2192 ret=ret)
2189 self._afterlock(runhook)
2193 self._afterlock(runhook)
2190 return ret
2194 return ret
2191
2195
2192 def listkeys(self, namespace):
2196 def listkeys(self, namespace):
2193 self.hook('prelistkeys', throw=True, namespace=namespace)
2197 self.hook('prelistkeys', throw=True, namespace=namespace)
2194 self.ui.debug('listing keys for "%s"\n' % namespace)
2198 self.ui.debug('listing keys for "%s"\n' % namespace)
2195 values = pushkey.list(self, namespace)
2199 values = pushkey.list(self, namespace)
2196 self.hook('listkeys', namespace=namespace, values=values)
2200 self.hook('listkeys', namespace=namespace, values=values)
2197 return values
2201 return values
2198
2202
2199 def debugwireargs(self, one, two, three=None, four=None, five=None):
2203 def debugwireargs(self, one, two, three=None, four=None, five=None):
2200 '''used to test argument passing over the wire'''
2204 '''used to test argument passing over the wire'''
2201 return "%s %s %s %s %s" % (one, two, three, four, five)
2205 return "%s %s %s %s %s" % (one, two, three, four, five)
2202
2206
2203 def savecommitmessage(self, text):
2207 def savecommitmessage(self, text):
2204 fp = self.vfs('last-message.txt', 'wb')
2208 fp = self.vfs('last-message.txt', 'wb')
2205 try:
2209 try:
2206 fp.write(text)
2210 fp.write(text)
2207 finally:
2211 finally:
2208 fp.close()
2212 fp.close()
2209 return self.pathto(fp.name[len(self.root) + 1:])
2213 return self.pathto(fp.name[len(self.root) + 1:])
2210
2214
2211 # used to avoid circular references so destructors work
2215 # used to avoid circular references so destructors work
2212 def aftertrans(files):
2216 def aftertrans(files):
2213 renamefiles = [tuple(t) for t in files]
2217 renamefiles = [tuple(t) for t in files]
2214 def a():
2218 def a():
2215 for vfs, src, dest in renamefiles:
2219 for vfs, src, dest in renamefiles:
2216 # if src and dest refer to a same file, vfs.rename is a no-op,
2220 # if src and dest refer to a same file, vfs.rename is a no-op,
2217 # leaving both src and dest on disk. delete dest to make sure
2221 # leaving both src and dest on disk. delete dest to make sure
2218 # the rename couldn't be such a no-op.
2222 # the rename couldn't be such a no-op.
2219 vfs.tryunlink(dest)
2223 vfs.tryunlink(dest)
2220 try:
2224 try:
2221 vfs.rename(src, dest)
2225 vfs.rename(src, dest)
2222 except OSError: # journal file does not yet exist
2226 except OSError: # journal file does not yet exist
2223 pass
2227 pass
2224 return a
2228 return a
2225
2229
2226 def undoname(fn):
2230 def undoname(fn):
2227 base, name = os.path.split(fn)
2231 base, name = os.path.split(fn)
2228 assert name.startswith('journal')
2232 assert name.startswith('journal')
2229 return os.path.join(base, name.replace('journal', 'undo', 1))
2233 return os.path.join(base, name.replace('journal', 'undo', 1))
2230
2234
2231 def instance(ui, path, create):
2235 def instance(ui, path, create):
2232 return localrepository(ui, util.urllocalpath(path), create)
2236 return localrepository(ui, util.urllocalpath(path), create)
2233
2237
2234 def islocal(path):
2238 def islocal(path):
2235 return True
2239 return True
2236
2240
2237 def newreporequirements(repo):
2241 def newreporequirements(repo):
2238 """Determine the set of requirements for a new local repository.
2242 """Determine the set of requirements for a new local repository.
2239
2243
2240 Extensions can wrap this function to specify custom requirements for
2244 Extensions can wrap this function to specify custom requirements for
2241 new repositories.
2245 new repositories.
2242 """
2246 """
2243 ui = repo.ui
2247 ui = repo.ui
2244 requirements = {'revlogv1'}
2248 requirements = {'revlogv1'}
2245 if ui.configbool('format', 'usestore'):
2249 if ui.configbool('format', 'usestore'):
2246 requirements.add('store')
2250 requirements.add('store')
2247 if ui.configbool('format', 'usefncache'):
2251 if ui.configbool('format', 'usefncache'):
2248 requirements.add('fncache')
2252 requirements.add('fncache')
2249 if ui.configbool('format', 'dotencode'):
2253 if ui.configbool('format', 'dotencode'):
2250 requirements.add('dotencode')
2254 requirements.add('dotencode')
2251
2255
2252 compengine = ui.config('experimental', 'format.compression')
2256 compengine = ui.config('experimental', 'format.compression')
2253 if compengine not in util.compengines:
2257 if compengine not in util.compengines:
2254 raise error.Abort(_('compression engine %s defined by '
2258 raise error.Abort(_('compression engine %s defined by '
2255 'experimental.format.compression not available') %
2259 'experimental.format.compression not available') %
2256 compengine,
2260 compengine,
2257 hint=_('run "hg debuginstall" to list available '
2261 hint=_('run "hg debuginstall" to list available '
2258 'compression engines'))
2262 'compression engines'))
2259
2263
2260 # zlib is the historical default and doesn't need an explicit requirement.
2264 # zlib is the historical default and doesn't need an explicit requirement.
2261 if compengine != 'zlib':
2265 if compengine != 'zlib':
2262 requirements.add('exp-compression-%s' % compengine)
2266 requirements.add('exp-compression-%s' % compengine)
2263
2267
2264 if scmutil.gdinitconfig(ui):
2268 if scmutil.gdinitconfig(ui):
2265 requirements.add('generaldelta')
2269 requirements.add('generaldelta')
2266 if ui.configbool('experimental', 'treemanifest'):
2270 if ui.configbool('experimental', 'treemanifest'):
2267 requirements.add('treemanifest')
2271 requirements.add('treemanifest')
2268 if ui.configbool('experimental', 'manifestv2'):
2272 if ui.configbool('experimental', 'manifestv2'):
2269 requirements.add('manifestv2')
2273 requirements.add('manifestv2')
2270
2274
2271 revlogv2 = ui.config('experimental', 'revlogv2')
2275 revlogv2 = ui.config('experimental', 'revlogv2')
2272 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2276 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2273 requirements.remove('revlogv1')
2277 requirements.remove('revlogv1')
2274 # generaldelta is implied by revlogv2.
2278 # generaldelta is implied by revlogv2.
2275 requirements.discard('generaldelta')
2279 requirements.discard('generaldelta')
2276 requirements.add(REVLOGV2_REQUIREMENT)
2280 requirements.add(REVLOGV2_REQUIREMENT)
2277
2281
2278 return requirements
2282 return requirements
@@ -1,176 +1,176 b''
1 Test changesets filtering during exchanges (some tests are still in
1 Test changesets filtering during exchanges (some tests are still in
2 test-obsolete.t)
2 test-obsolete.t)
3
3
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > evolution.createmarkers=True
6 > evolution.createmarkers=True
7 > EOF
7 > EOF
8
8
9 Push does not corrupt remote
9 Push does not corrupt remote
10 ----------------------------
10 ----------------------------
11
11
12 Create a DAG where a changeset reuses a revision from a file first used in an
12 Create a DAG where a changeset reuses a revision from a file first used in an
13 extinct changeset.
13 extinct changeset.
14
14
15 $ hg init local
15 $ hg init local
16 $ cd local
16 $ cd local
17 $ echo 'base' > base
17 $ echo 'base' > base
18 $ hg commit -Am base
18 $ hg commit -Am base
19 adding base
19 adding base
20 $ echo 'A' > A
20 $ echo 'A' > A
21 $ hg commit -Am A
21 $ hg commit -Am A
22 adding A
22 adding A
23 $ hg up 0
23 $ hg up 0
24 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
24 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
25 $ hg revert -ar 1
25 $ hg revert -ar 1
26 adding A
26 adding A
27 $ hg commit -Am "A'"
27 $ hg commit -Am "A'"
28 created new head
28 created new head
29 $ hg log -G --template='{desc} {node}'
29 $ hg log -G --template='{desc} {node}'
30 @ A' f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
30 @ A' f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
31 |
31 |
32 | o A 9d73aac1b2ed7d53835eaeec212ed41ea47da53a
32 | o A 9d73aac1b2ed7d53835eaeec212ed41ea47da53a
33 |/
33 |/
34 o base d20a80d4def38df63a4b330b7fb688f3d4cae1e3
34 o base d20a80d4def38df63a4b330b7fb688f3d4cae1e3
35
35
36 $ hg debugobsolete 9d73aac1b2ed7d53835eaeec212ed41ea47da53a f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
36 $ hg debugobsolete 9d73aac1b2ed7d53835eaeec212ed41ea47da53a f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
37 obsoleted 1 changesets
37 obsoleted 1 changesets
38
38
39 Push it. The bundle should not refer to the extinct changeset.
39 Push it. The bundle should not refer to the extinct changeset.
40
40
41 $ hg init ../other
41 $ hg init ../other
42 $ hg push ../other
42 $ hg push ../other
43 pushing to ../other
43 pushing to ../other
44 searching for changes
44 searching for changes
45 adding changesets
45 adding changesets
46 adding manifests
46 adding manifests
47 adding file changes
47 adding file changes
48 added 2 changesets with 2 changes to 2 files
48 added 2 changesets with 2 changes to 2 files
49 $ hg -R ../other verify
49 $ hg -R ../other verify
50 checking changesets
50 checking changesets
51 checking manifests
51 checking manifests
52 crosschecking files in changesets and manifests
52 crosschecking files in changesets and manifests
53 checking files
53 checking files
54 2 files, 2 changesets, 2 total revisions
54 2 files, 2 changesets, 2 total revisions
55
55
56 Adding a changeset going extinct locally
56 Adding a changeset going extinct locally
57 ------------------------------------------
57 ------------------------------------------
58
58
59 Pull a changeset that will immediatly goes extinct (because you already have a
59 Pull a changeset that will immediatly goes extinct (because you already have a
60 marker to obsolete him)
60 marker to obsolete him)
61 (test resolution of issue3788)
61 (test resolution of issue3788)
62
62
63 $ hg phase --draft --force f89bcc95eba5
63 $ hg phase --draft --force f89bcc95eba5
64 $ hg phase -R ../other --draft --force f89bcc95eba5
64 $ hg phase -R ../other --draft --force f89bcc95eba5
65 $ hg commit --amend -m "A''"
65 $ hg commit --amend -m "A''"
66 $ hg --hidden --config extensions.mq= strip --no-backup f89bcc95eba5
66 $ hg --hidden --config extensions.mq= strip --no-backup f89bcc95eba5
67 $ hg pull ../other
67 $ hg pull ../other
68 pulling from ../other
68 pulling from ../other
69 searching for changes
69 searching for changes
70 adding changesets
70 adding changesets
71 adding manifests
71 adding manifests
72 adding file changes
72 adding file changes
73 added 1 changesets with 0 changes to 1 files (+1 heads)
73 added 1 changesets with 0 changes to 1 files (+1 heads)
74 1 new phase-divergent changesets
74 1 new phase-divergent changesets
75 new changesets f89bcc95eba5
75 new changesets f89bcc95eba5
76 (run 'hg heads' to see heads, 'hg merge' to merge)
76 (run 'hg heads' to see heads, 'hg merge' to merge)
77
77
78 check that bundle is not affected
78 check that bundle is not affected
79
79
80 $ hg bundle --hidden --rev f89bcc95eba5 --base "f89bcc95eba5^" ../f89bcc95eba5.hg
80 $ hg bundle --hidden --rev f89bcc95eba5 --base "f89bcc95eba5^" ../f89bcc95eba5.hg
81 1 changesets found
81 1 changesets found
82 $ hg --hidden --config extensions.mq= strip --no-backup f89bcc95eba5
82 $ hg --hidden --config extensions.mq= strip --no-backup f89bcc95eba5
83 $ hg unbundle ../f89bcc95eba5.hg
83 $ hg unbundle ../f89bcc95eba5.hg
84 adding changesets
84 adding changesets
85 adding manifests
85 adding manifests
86 adding file changes
86 adding file changes
87 added 1 changesets with 0 changes to 1 files (+1 heads)
87 added 1 changesets with 0 changes to 1 files (+1 heads)
88 (run 'hg heads' to see heads)
88 (run 'hg heads' to see heads)
89
89
90 check-that bundle can contain markers:
90 check-that bundle can contain markers:
91
91
92 $ hg bundle --hidden --rev f89bcc95eba5 --base "f89bcc95eba5^" ../f89bcc95eba5-obs.hg --config experimental.evolution.bundle-obsmarker=1
92 $ hg bundle --hidden --rev f89bcc95eba5 --base "f89bcc95eba5^" ../f89bcc95eba5-obs.hg --config experimental.evolution.bundle-obsmarker=1
93 1 changesets found
93 1 changesets found
94 $ hg debugbundle ../f89bcc95eba5.hg
94 $ hg debugbundle ../f89bcc95eba5.hg
95 Stream params: {Compression: BZ}
95 Stream params: {Compression: BZ}
96 changegroup -- {nbchanges: 1, version: 02}
96 changegroup -- {nbchanges: 1, version: 02}
97 f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
97 f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
98 $ hg debugbundle ../f89bcc95eba5-obs.hg
98 $ hg debugbundle ../f89bcc95eba5-obs.hg
99 Stream params: {Compression: BZ}
99 Stream params: {Compression: BZ}
100 changegroup -- {nbchanges: 1, version: 02}
100 changegroup -- {nbchanges: 1, version: 02}
101 f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
101 f89bcc95eba5174b1ccc3e33a82e84c96e8338ee
102 obsmarkers -- {}
102 obsmarkers -- {}
103 version: 1 (70 bytes)
103 version: 1 (70 bytes)
104 9d73aac1b2ed7d53835eaeec212ed41ea47da53a f89bcc95eba5174b1ccc3e33a82e84c96e8338ee 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
104 9d73aac1b2ed7d53835eaeec212ed41ea47da53a f89bcc95eba5174b1ccc3e33a82e84c96e8338ee 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
105
105
106 $ cd ..
106 $ cd ..
107
107
108 pull does not fetch excessive changesets when common node is hidden (issue4982)
108 pull does not fetch excessive changesets when common node is hidden (issue4982)
109 -------------------------------------------------------------------------------
109 -------------------------------------------------------------------------------
110
110
111 initial repo with server and client matching
111 initial repo with server and client matching
112
112
113 $ hg init pull-hidden-common
113 $ hg init pull-hidden-common
114 $ cd pull-hidden-common
114 $ cd pull-hidden-common
115 $ touch foo
115 $ touch foo
116 $ hg -q commit -A -m initial
116 $ hg -q commit -A -m initial
117 $ echo 1 > foo
117 $ echo 1 > foo
118 $ hg commit -m 1
118 $ hg commit -m 1
119 $ echo 2a > foo
119 $ echo 2a > foo
120 $ hg commit -m 2a
120 $ hg commit -m 2a
121 $ cd ..
121 $ cd ..
122 $ hg clone --pull pull-hidden-common pull-hidden-common-client
122 $ hg clone --pull pull-hidden-common pull-hidden-common-client
123 requesting all changes
123 requesting all changes
124 adding changesets
124 adding changesets
125 adding manifests
125 adding manifests
126 adding file changes
126 adding file changes
127 added 3 changesets with 3 changes to 1 files
127 added 3 changesets with 3 changes to 1 files
128 new changesets 96ee1d7354c4:6a29ed9c68de
128 new changesets 96ee1d7354c4:6a29ed9c68de
129 updating to branch default
129 updating to branch default
130 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
131
131
132 server obsoletes the old head
132 server obsoletes the old head
133
133
134 $ cd pull-hidden-common
134 $ cd pull-hidden-common
135 $ hg -q up -r 1
135 $ hg -q up -r 1
136 $ echo 2b > foo
136 $ echo 2b > foo
137 $ hg -q commit -m 2b
137 $ hg -q commit -m 2b
138 $ hg debugobsolete 6a29ed9c68defff1a139e5c6fa9696fb1a75783d bec0734cd68e84477ba7fc1d13e6cff53ab70129
138 $ hg debugobsolete 6a29ed9c68defff1a139e5c6fa9696fb1a75783d bec0734cd68e84477ba7fc1d13e6cff53ab70129
139 obsoleted 1 changesets
139 obsoleted 1 changesets
140 $ cd ..
140 $ cd ..
141
141
142 client only pulls down 1 changeset
142 client only pulls down 1 changeset
143
143
144 $ cd pull-hidden-common-client
144 $ cd pull-hidden-common-client
145 $ hg pull --debug
145 $ hg pull --debug
146 pulling from $TESTTMP/pull-hidden-common
146 pulling from $TESTTMP/pull-hidden-common
147 query 1; heads
147 query 1; heads
148 searching for changes
148 searching for changes
149 taking quick initial sample
149 taking quick initial sample
150 query 2; still undecided: 2, sample size is: 2
150 query 2; still undecided: 2, sample size is: 2
151 2 total queries in *.????s (glob)
151 2 total queries in *.????s (glob)
152 1 changesets found
152 1 changesets found
153 list of changesets:
153 list of changesets:
154 bec0734cd68e84477ba7fc1d13e6cff53ab70129
154 bec0734cd68e84477ba7fc1d13e6cff53ab70129
155 listing keys for "bookmarks"
155 listing keys for "bookmarks"
156 bundle2-output-bundle: "HG20", 3 parts total
156 bundle2-output-bundle: "HG20", 3 parts total
157 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
157 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
158 bundle2-output-part: "listkeys" (params: 1 mandatory) empty payload
158 bundle2-output-part: "listkeys" (params: 1 mandatory) empty payload
159 bundle2-output-part: "phase-heads" 24 bytes payload
159 bundle2-output-part: "phase-heads" 24 bytes payload
160 bundle2-input-bundle: with-transaction
160 bundle2-input-bundle: with-transaction
161 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
161 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
162 adding changesets
162 adding changesets
163 add changeset bec0734cd68e
163 add changeset bec0734cd68e
164 adding manifests
164 adding manifests
165 adding file changes
165 adding file changes
166 adding foo revisions
166 adding foo revisions
167 added 1 changesets with 1 changes to 1 files (+1 heads)
167 added 1 changesets with 1 changes to 1 files (+1 heads)
168 bundle2-input-part: total payload size 476
168 bundle2-input-part: total payload size 476
169 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
169 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
170 bundle2-input-part: "phase-heads" supported
170 bundle2-input-part: "phase-heads" supported
171 bundle2-input-part: total payload size 24
171 bundle2-input-part: total payload size 24
172 bundle2-input-bundle: 2 parts total
172 bundle2-input-bundle: 2 parts total
173 checking for updated bookmarks
173 checking for updated bookmarks
174 updating the branch cache
174 new changesets bec0734cd68e
175 new changesets bec0734cd68e
175 updating the branch cache
176 (run 'hg heads' to see heads, 'hg merge' to merge)
176 (run 'hg heads' to see heads, 'hg merge' to merge)
General Comments 0
You need to be logged in to leave comments. Login now