##// END OF EJS Templates
hg: don't reuse repo instance after unshare()...
Gregory Szorc -
r39642:c5e6c1ba default
parent child Browse files
Show More
@@ -1,1231 +1,1242 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 scmutil,
41 scmutil,
42 sshpeer,
42 sshpeer,
43 statichttprepo,
43 statichttprepo,
44 ui as uimod,
44 ui as uimod,
45 unionrepo,
45 unionrepo,
46 url,
46 url,
47 util,
47 util,
48 verify as verifymod,
48 verify as verifymod,
49 vfs as vfsmod,
49 vfs as vfsmod,
50 )
50 )
51
51
52 from .utils import (
52 from .utils import (
53 stringutil,
53 stringutil,
54 )
54 )
55
55
56 release = lock.release
56 release = lock.release
57
57
58 # shared features
58 # shared features
59 sharedbookmarks = 'bookmarks'
59 sharedbookmarks = 'bookmarks'
60
60
61 def _local(path):
61 def _local(path):
62 path = util.expandpath(util.urllocalpath(path))
62 path = util.expandpath(util.urllocalpath(path))
63 return (os.path.isfile(path) and bundlerepo or localrepo)
63 return (os.path.isfile(path) and bundlerepo or localrepo)
64
64
65 def addbranchrevs(lrepo, other, branches, revs):
65 def addbranchrevs(lrepo, other, branches, revs):
66 peer = other.peer() # a courtesy to callers using a localrepo for other
66 peer = other.peer() # a courtesy to callers using a localrepo for other
67 hashbranch, branches = branches
67 hashbranch, branches = branches
68 if not hashbranch and not branches:
68 if not hashbranch and not branches:
69 x = revs or None
69 x = revs or None
70 if revs:
70 if revs:
71 y = revs[0]
71 y = revs[0]
72 else:
72 else:
73 y = None
73 y = None
74 return x, y
74 return x, y
75 if revs:
75 if revs:
76 revs = list(revs)
76 revs = list(revs)
77 else:
77 else:
78 revs = []
78 revs = []
79
79
80 if not peer.capable('branchmap'):
80 if not peer.capable('branchmap'):
81 if branches:
81 if branches:
82 raise error.Abort(_("remote branch lookup not supported"))
82 raise error.Abort(_("remote branch lookup not supported"))
83 revs.append(hashbranch)
83 revs.append(hashbranch)
84 return revs, revs[0]
84 return revs, revs[0]
85
85
86 with peer.commandexecutor() as e:
86 with peer.commandexecutor() as e:
87 branchmap = e.callcommand('branchmap', {}).result()
87 branchmap = e.callcommand('branchmap', {}).result()
88
88
89 def primary(branch):
89 def primary(branch):
90 if branch == '.':
90 if branch == '.':
91 if not lrepo:
91 if not lrepo:
92 raise error.Abort(_("dirstate branch not accessible"))
92 raise error.Abort(_("dirstate branch not accessible"))
93 branch = lrepo.dirstate.branch()
93 branch = lrepo.dirstate.branch()
94 if branch in branchmap:
94 if branch in branchmap:
95 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
95 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
96 return True
96 return True
97 else:
97 else:
98 return False
98 return False
99
99
100 for branch in branches:
100 for branch in branches:
101 if not primary(branch):
101 if not primary(branch):
102 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
102 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
103 if hashbranch:
103 if hashbranch:
104 if not primary(hashbranch):
104 if not primary(hashbranch):
105 revs.append(hashbranch)
105 revs.append(hashbranch)
106 return revs, revs[0]
106 return revs, revs[0]
107
107
108 def parseurl(path, branches=None):
108 def parseurl(path, branches=None):
109 '''parse url#branch, returning (url, (branch, branches))'''
109 '''parse url#branch, returning (url, (branch, branches))'''
110
110
111 u = util.url(path)
111 u = util.url(path)
112 branch = None
112 branch = None
113 if u.fragment:
113 if u.fragment:
114 branch = u.fragment
114 branch = u.fragment
115 u.fragment = None
115 u.fragment = None
116 return bytes(u), (branch, branches or [])
116 return bytes(u), (branch, branches or [])
117
117
118 schemes = {
118 schemes = {
119 'bundle': bundlerepo,
119 'bundle': bundlerepo,
120 'union': unionrepo,
120 'union': unionrepo,
121 'file': _local,
121 'file': _local,
122 'http': httppeer,
122 'http': httppeer,
123 'https': httppeer,
123 'https': httppeer,
124 'ssh': sshpeer,
124 'ssh': sshpeer,
125 'static-http': statichttprepo,
125 'static-http': statichttprepo,
126 }
126 }
127
127
128 def _peerlookup(path):
128 def _peerlookup(path):
129 u = util.url(path)
129 u = util.url(path)
130 scheme = u.scheme or 'file'
130 scheme = u.scheme or 'file'
131 thing = schemes.get(scheme) or schemes['file']
131 thing = schemes.get(scheme) or schemes['file']
132 try:
132 try:
133 return thing(path)
133 return thing(path)
134 except TypeError:
134 except TypeError:
135 # we can't test callable(thing) because 'thing' can be an unloaded
135 # we can't test callable(thing) because 'thing' can be an unloaded
136 # module that implements __call__
136 # module that implements __call__
137 if not util.safehasattr(thing, 'instance'):
137 if not util.safehasattr(thing, 'instance'):
138 raise
138 raise
139 return thing
139 return thing
140
140
141 def islocal(repo):
141 def islocal(repo):
142 '''return true if repo (or path pointing to repo) is local'''
142 '''return true if repo (or path pointing to repo) is local'''
143 if isinstance(repo, bytes):
143 if isinstance(repo, bytes):
144 try:
144 try:
145 return _peerlookup(repo).islocal(repo)
145 return _peerlookup(repo).islocal(repo)
146 except AttributeError:
146 except AttributeError:
147 return False
147 return False
148 return repo.local()
148 return repo.local()
149
149
150 def openpath(ui, path):
150 def openpath(ui, path):
151 '''open path with open if local, url.open if remote'''
151 '''open path with open if local, url.open if remote'''
152 pathurl = util.url(path, parsequery=False, parsefragment=False)
152 pathurl = util.url(path, parsequery=False, parsefragment=False)
153 if pathurl.islocal():
153 if pathurl.islocal():
154 return util.posixfile(pathurl.localpath(), 'rb')
154 return util.posixfile(pathurl.localpath(), 'rb')
155 else:
155 else:
156 return url.open(ui, path)
156 return url.open(ui, path)
157
157
158 # a list of (ui, repo) functions called for wire peer initialization
158 # a list of (ui, repo) functions called for wire peer initialization
159 wirepeersetupfuncs = []
159 wirepeersetupfuncs = []
160
160
161 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
161 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
162 intents=None, createopts=None):
162 intents=None, createopts=None):
163 """return a repository object for the specified path"""
163 """return a repository object for the specified path"""
164 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
164 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
165 createopts=createopts)
165 createopts=createopts)
166 ui = getattr(obj, "ui", ui)
166 ui = getattr(obj, "ui", ui)
167 if ui.configbool('devel', 'debug.extensions'):
167 if ui.configbool('devel', 'debug.extensions'):
168 log = lambda msg, *values: ui.debug('debug.extensions: ',
168 log = lambda msg, *values: ui.debug('debug.extensions: ',
169 msg % values, label='debug.extensions')
169 msg % values, label='debug.extensions')
170 else:
170 else:
171 log = lambda *a, **kw: None
171 log = lambda *a, **kw: None
172 for f in presetupfuncs or []:
172 for f in presetupfuncs or []:
173 f(ui, obj)
173 f(ui, obj)
174 log('- executing reposetup hooks\n')
174 log('- executing reposetup hooks\n')
175 with util.timedcm('all reposetup') as allreposetupstats:
175 with util.timedcm('all reposetup') as allreposetupstats:
176 for name, module in extensions.extensions(ui):
176 for name, module in extensions.extensions(ui):
177 log(' - running reposetup for %s\n' % (name,))
177 log(' - running reposetup for %s\n' % (name,))
178 hook = getattr(module, 'reposetup', None)
178 hook = getattr(module, 'reposetup', None)
179 if hook:
179 if hook:
180 with util.timedcm('reposetup %r', name) as stats:
180 with util.timedcm('reposetup %r', name) as stats:
181 hook(ui, obj)
181 hook(ui, obj)
182 log(' > reposetup for %r took %s\n', name, stats)
182 log(' > reposetup for %r took %s\n', name, stats)
183 log('> all reposetup took %s\n', allreposetupstats)
183 log('> all reposetup took %s\n', allreposetupstats)
184 if not obj.local():
184 if not obj.local():
185 for f in wirepeersetupfuncs:
185 for f in wirepeersetupfuncs:
186 f(ui, obj)
186 f(ui, obj)
187 return obj
187 return obj
188
188
189 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
189 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
190 createopts=None):
190 createopts=None):
191 """return a repository object for the specified path"""
191 """return a repository object for the specified path"""
192 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
192 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
193 intents=intents, createopts=createopts)
193 intents=intents, createopts=createopts)
194 repo = peer.local()
194 repo = peer.local()
195 if not repo:
195 if not repo:
196 raise error.Abort(_("repository '%s' is not local") %
196 raise error.Abort(_("repository '%s' is not local") %
197 (path or peer.url()))
197 (path or peer.url()))
198 return repo.filtered('visible')
198 return repo.filtered('visible')
199
199
200 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
200 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
201 '''return a repository peer for the specified path'''
201 '''return a repository peer for the specified path'''
202 rui = remoteui(uiorrepo, opts)
202 rui = remoteui(uiorrepo, opts)
203 return _peerorrepo(rui, path, create, intents=intents,
203 return _peerorrepo(rui, path, create, intents=intents,
204 createopts=createopts).peer()
204 createopts=createopts).peer()
205
205
206 def defaultdest(source):
206 def defaultdest(source):
207 '''return default destination of clone if none is given
207 '''return default destination of clone if none is given
208
208
209 >>> defaultdest(b'foo')
209 >>> defaultdest(b'foo')
210 'foo'
210 'foo'
211 >>> defaultdest(b'/foo/bar')
211 >>> defaultdest(b'/foo/bar')
212 'bar'
212 'bar'
213 >>> defaultdest(b'/')
213 >>> defaultdest(b'/')
214 ''
214 ''
215 >>> defaultdest(b'')
215 >>> defaultdest(b'')
216 ''
216 ''
217 >>> defaultdest(b'http://example.org/')
217 >>> defaultdest(b'http://example.org/')
218 ''
218 ''
219 >>> defaultdest(b'http://example.org/foo/')
219 >>> defaultdest(b'http://example.org/foo/')
220 'foo'
220 'foo'
221 '''
221 '''
222 path = util.url(source).path
222 path = util.url(source).path
223 if not path:
223 if not path:
224 return ''
224 return ''
225 return os.path.basename(os.path.normpath(path))
225 return os.path.basename(os.path.normpath(path))
226
226
227 def sharedreposource(repo):
227 def sharedreposource(repo):
228 """Returns repository object for source repository of a shared repo.
228 """Returns repository object for source repository of a shared repo.
229
229
230 If repo is not a shared repository, returns None.
230 If repo is not a shared repository, returns None.
231 """
231 """
232 if repo.sharedpath == repo.path:
232 if repo.sharedpath == repo.path:
233 return None
233 return None
234
234
235 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
235 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
236 return repo.srcrepo
236 return repo.srcrepo
237
237
238 # the sharedpath always ends in the .hg; we want the path to the repo
238 # the sharedpath always ends in the .hg; we want the path to the repo
239 source = repo.vfs.split(repo.sharedpath)[0]
239 source = repo.vfs.split(repo.sharedpath)[0]
240 srcurl, branches = parseurl(source)
240 srcurl, branches = parseurl(source)
241 srcrepo = repository(repo.ui, srcurl)
241 srcrepo = repository(repo.ui, srcurl)
242 repo.srcrepo = srcrepo
242 repo.srcrepo = srcrepo
243 return srcrepo
243 return srcrepo
244
244
245 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
245 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
246 relative=False):
246 relative=False):
247 '''create a shared repository'''
247 '''create a shared repository'''
248
248
249 if not islocal(source):
249 if not islocal(source):
250 raise error.Abort(_('can only share local repositories'))
250 raise error.Abort(_('can only share local repositories'))
251
251
252 if not dest:
252 if not dest:
253 dest = defaultdest(source)
253 dest = defaultdest(source)
254 else:
254 else:
255 dest = ui.expandpath(dest)
255 dest = ui.expandpath(dest)
256
256
257 if isinstance(source, bytes):
257 if isinstance(source, bytes):
258 origsource = ui.expandpath(source)
258 origsource = ui.expandpath(source)
259 source, branches = parseurl(origsource)
259 source, branches = parseurl(origsource)
260 srcrepo = repository(ui, source)
260 srcrepo = repository(ui, source)
261 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
261 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
262 else:
262 else:
263 srcrepo = source.local()
263 srcrepo = source.local()
264 origsource = source = srcrepo.url()
264 origsource = source = srcrepo.url()
265 checkout = None
265 checkout = None
266
266
267 sharedpath = srcrepo.sharedpath # if our source is already sharing
267 sharedpath = srcrepo.sharedpath # if our source is already sharing
268
268
269 destwvfs = vfsmod.vfs(dest, realpath=True)
269 destwvfs = vfsmod.vfs(dest, realpath=True)
270 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
270 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
271
271
272 if destvfs.lexists():
272 if destvfs.lexists():
273 raise error.Abort(_('destination already exists'))
273 raise error.Abort(_('destination already exists'))
274
274
275 if not destwvfs.isdir():
275 if not destwvfs.isdir():
276 destwvfs.makedirs()
276 destwvfs.makedirs()
277 destvfs.makedir()
277 destvfs.makedir()
278
278
279 requirements = ''
279 requirements = ''
280 try:
280 try:
281 requirements = srcrepo.vfs.read('requires')
281 requirements = srcrepo.vfs.read('requires')
282 except IOError as inst:
282 except IOError as inst:
283 if inst.errno != errno.ENOENT:
283 if inst.errno != errno.ENOENT:
284 raise
284 raise
285
285
286 if relative:
286 if relative:
287 try:
287 try:
288 sharedpath = os.path.relpath(sharedpath, destvfs.base)
288 sharedpath = os.path.relpath(sharedpath, destvfs.base)
289 requirements += 'relshared\n'
289 requirements += 'relshared\n'
290 except (IOError, ValueError) as e:
290 except (IOError, ValueError) as e:
291 # ValueError is raised on Windows if the drive letters differ on
291 # ValueError is raised on Windows if the drive letters differ on
292 # each path
292 # each path
293 raise error.Abort(_('cannot calculate relative path'),
293 raise error.Abort(_('cannot calculate relative path'),
294 hint=stringutil.forcebytestr(e))
294 hint=stringutil.forcebytestr(e))
295 else:
295 else:
296 requirements += 'shared\n'
296 requirements += 'shared\n'
297
297
298 destvfs.write('requires', requirements)
298 destvfs.write('requires', requirements)
299 destvfs.write('sharedpath', sharedpath)
299 destvfs.write('sharedpath', sharedpath)
300
300
301 r = repository(ui, destwvfs.base)
301 r = repository(ui, destwvfs.base)
302 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
302 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
303 _postshareupdate(r, update, checkout=checkout)
303 _postshareupdate(r, update, checkout=checkout)
304 return r
304 return r
305
305
306 def unshare(ui, repo):
306 def unshare(ui, repo):
307 """convert a shared repository to a normal one
307 """convert a shared repository to a normal one
308
308
309 Copy the store data to the repo and remove the sharedpath data.
309 Copy the store data to the repo and remove the sharedpath data.
310
311 Returns a new repository object representing the unshared repository.
312
313 The passed repository object is not usable after this function is
314 called.
310 """
315 """
311
316
312 destlock = lock = None
317 destlock = lock = None
313 lock = repo.lock()
318 lock = repo.lock()
314 try:
319 try:
315 # we use locks here because if we race with commit, we
320 # we use locks here because if we race with commit, we
316 # can end up with extra data in the cloned revlogs that's
321 # can end up with extra data in the cloned revlogs that's
317 # not pointed to by changesets, thus causing verify to
322 # not pointed to by changesets, thus causing verify to
318 # fail
323 # fail
319
324
320 destlock = copystore(ui, repo, repo.path)
325 destlock = copystore(ui, repo, repo.path)
321
326
322 sharefile = repo.vfs.join('sharedpath')
327 sharefile = repo.vfs.join('sharedpath')
323 util.rename(sharefile, sharefile + '.old')
328 util.rename(sharefile, sharefile + '.old')
324
329
325 repo.requirements.discard('shared')
330 repo.requirements.discard('shared')
326 repo.requirements.discard('relshared')
331 repo.requirements.discard('relshared')
327 repo._writerequirements()
332 repo._writerequirements()
328 finally:
333 finally:
329 destlock and destlock.release()
334 destlock and destlock.release()
330 lock and lock.release()
335 lock and lock.release()
331
336
332 # update store, spath, svfs and sjoin of repo
337 # Removing share changes some fundamental properties of the repo instance.
333 repo.unfiltered().__init__(repo.baseui, repo.root)
338 # So we instantiate a new repo object and operate on it rather than
339 # try to keep the existing repo usable.
340 newrepo = repository(repo.baseui, repo.root, create=False)
334
341
335 # TODO: figure out how to access subrepos that exist, but were previously
342 # TODO: figure out how to access subrepos that exist, but were previously
336 # removed from .hgsub
343 # removed from .hgsub
337 c = repo['.']
344 c = newrepo['.']
338 subs = c.substate
345 subs = c.substate
339 for s in sorted(subs):
346 for s in sorted(subs):
340 c.sub(s).unshare()
347 c.sub(s).unshare()
341
348
349 localrepo.poisonrepository(repo)
350
351 return newrepo
352
342 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
353 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
343 """Called after a new shared repo is created.
354 """Called after a new shared repo is created.
344
355
345 The new repo only has a requirements file and pointer to the source.
356 The new repo only has a requirements file and pointer to the source.
346 This function configures additional shared data.
357 This function configures additional shared data.
347
358
348 Extensions can wrap this function and write additional entries to
359 Extensions can wrap this function and write additional entries to
349 destrepo/.hg/shared to indicate additional pieces of data to be shared.
360 destrepo/.hg/shared to indicate additional pieces of data to be shared.
350 """
361 """
351 default = defaultpath or sourcerepo.ui.config('paths', 'default')
362 default = defaultpath or sourcerepo.ui.config('paths', 'default')
352 if default:
363 if default:
353 template = ('[paths]\n'
364 template = ('[paths]\n'
354 'default = %s\n')
365 'default = %s\n')
355 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
366 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
356
367
357 with destrepo.wlock():
368 with destrepo.wlock():
358 if bookmarks:
369 if bookmarks:
359 destrepo.vfs.write('shared', sharedbookmarks + '\n')
370 destrepo.vfs.write('shared', sharedbookmarks + '\n')
360
371
361 def _postshareupdate(repo, update, checkout=None):
372 def _postshareupdate(repo, update, checkout=None):
362 """Maybe perform a working directory update after a shared repo is created.
373 """Maybe perform a working directory update after a shared repo is created.
363
374
364 ``update`` can be a boolean or a revision to update to.
375 ``update`` can be a boolean or a revision to update to.
365 """
376 """
366 if not update:
377 if not update:
367 return
378 return
368
379
369 repo.ui.status(_("updating working directory\n"))
380 repo.ui.status(_("updating working directory\n"))
370 if update is not True:
381 if update is not True:
371 checkout = update
382 checkout = update
372 for test in (checkout, 'default', 'tip'):
383 for test in (checkout, 'default', 'tip'):
373 if test is None:
384 if test is None:
374 continue
385 continue
375 try:
386 try:
376 uprev = repo.lookup(test)
387 uprev = repo.lookup(test)
377 break
388 break
378 except error.RepoLookupError:
389 except error.RepoLookupError:
379 continue
390 continue
380 _update(repo, uprev)
391 _update(repo, uprev)
381
392
382 def copystore(ui, srcrepo, destpath):
393 def copystore(ui, srcrepo, destpath):
383 '''copy files from store of srcrepo in destpath
394 '''copy files from store of srcrepo in destpath
384
395
385 returns destlock
396 returns destlock
386 '''
397 '''
387 destlock = None
398 destlock = None
388 try:
399 try:
389 hardlink = None
400 hardlink = None
390 topic = _('linking') if hardlink else _('copying')
401 topic = _('linking') if hardlink else _('copying')
391 with ui.makeprogress(topic) as progress:
402 with ui.makeprogress(topic) as progress:
392 num = 0
403 num = 0
393 srcpublishing = srcrepo.publishing()
404 srcpublishing = srcrepo.publishing()
394 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
405 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
395 dstvfs = vfsmod.vfs(destpath)
406 dstvfs = vfsmod.vfs(destpath)
396 for f in srcrepo.store.copylist():
407 for f in srcrepo.store.copylist():
397 if srcpublishing and f.endswith('phaseroots'):
408 if srcpublishing and f.endswith('phaseroots'):
398 continue
409 continue
399 dstbase = os.path.dirname(f)
410 dstbase = os.path.dirname(f)
400 if dstbase and not dstvfs.exists(dstbase):
411 if dstbase and not dstvfs.exists(dstbase):
401 dstvfs.mkdir(dstbase)
412 dstvfs.mkdir(dstbase)
402 if srcvfs.exists(f):
413 if srcvfs.exists(f):
403 if f.endswith('data'):
414 if f.endswith('data'):
404 # 'dstbase' may be empty (e.g. revlog format 0)
415 # 'dstbase' may be empty (e.g. revlog format 0)
405 lockfile = os.path.join(dstbase, "lock")
416 lockfile = os.path.join(dstbase, "lock")
406 # lock to avoid premature writing to the target
417 # lock to avoid premature writing to the target
407 destlock = lock.lock(dstvfs, lockfile)
418 destlock = lock.lock(dstvfs, lockfile)
408 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
419 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
409 hardlink, progress)
420 hardlink, progress)
410 num += n
421 num += n
411 if hardlink:
422 if hardlink:
412 ui.debug("linked %d files\n" % num)
423 ui.debug("linked %d files\n" % num)
413 else:
424 else:
414 ui.debug("copied %d files\n" % num)
425 ui.debug("copied %d files\n" % num)
415 return destlock
426 return destlock
416 except: # re-raises
427 except: # re-raises
417 release(destlock)
428 release(destlock)
418 raise
429 raise
419
430
420 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
431 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
421 rev=None, update=True, stream=False):
432 rev=None, update=True, stream=False):
422 """Perform a clone using a shared repo.
433 """Perform a clone using a shared repo.
423
434
424 The store for the repository will be located at <sharepath>/.hg. The
435 The store for the repository will be located at <sharepath>/.hg. The
425 specified revisions will be cloned or pulled from "source". A shared repo
436 specified revisions will be cloned or pulled from "source". A shared repo
426 will be created at "dest" and a working copy will be created if "update" is
437 will be created at "dest" and a working copy will be created if "update" is
427 True.
438 True.
428 """
439 """
429 revs = None
440 revs = None
430 if rev:
441 if rev:
431 if not srcpeer.capable('lookup'):
442 if not srcpeer.capable('lookup'):
432 raise error.Abort(_("src repository does not support "
443 raise error.Abort(_("src repository does not support "
433 "revision lookup and so doesn't "
444 "revision lookup and so doesn't "
434 "support clone by revision"))
445 "support clone by revision"))
435
446
436 # TODO this is batchable.
447 # TODO this is batchable.
437 remoterevs = []
448 remoterevs = []
438 for r in rev:
449 for r in rev:
439 with srcpeer.commandexecutor() as e:
450 with srcpeer.commandexecutor() as e:
440 remoterevs.append(e.callcommand('lookup', {
451 remoterevs.append(e.callcommand('lookup', {
441 'key': r,
452 'key': r,
442 }).result())
453 }).result())
443 revs = remoterevs
454 revs = remoterevs
444
455
445 # Obtain a lock before checking for or cloning the pooled repo otherwise
456 # Obtain a lock before checking for or cloning the pooled repo otherwise
446 # 2 clients may race creating or populating it.
457 # 2 clients may race creating or populating it.
447 pooldir = os.path.dirname(sharepath)
458 pooldir = os.path.dirname(sharepath)
448 # lock class requires the directory to exist.
459 # lock class requires the directory to exist.
449 try:
460 try:
450 util.makedir(pooldir, False)
461 util.makedir(pooldir, False)
451 except OSError as e:
462 except OSError as e:
452 if e.errno != errno.EEXIST:
463 if e.errno != errno.EEXIST:
453 raise
464 raise
454
465
455 poolvfs = vfsmod.vfs(pooldir)
466 poolvfs = vfsmod.vfs(pooldir)
456 basename = os.path.basename(sharepath)
467 basename = os.path.basename(sharepath)
457
468
458 with lock.lock(poolvfs, '%s.lock' % basename):
469 with lock.lock(poolvfs, '%s.lock' % basename):
459 if os.path.exists(sharepath):
470 if os.path.exists(sharepath):
460 ui.status(_('(sharing from existing pooled repository %s)\n') %
471 ui.status(_('(sharing from existing pooled repository %s)\n') %
461 basename)
472 basename)
462 else:
473 else:
463 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
474 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
464 # Always use pull mode because hardlinks in share mode don't work
475 # Always use pull mode because hardlinks in share mode don't work
465 # well. Never update because working copies aren't necessary in
476 # well. Never update because working copies aren't necessary in
466 # share mode.
477 # share mode.
467 clone(ui, peeropts, source, dest=sharepath, pull=True,
478 clone(ui, peeropts, source, dest=sharepath, pull=True,
468 revs=rev, update=False, stream=stream)
479 revs=rev, update=False, stream=stream)
469
480
470 # Resolve the value to put in [paths] section for the source.
481 # Resolve the value to put in [paths] section for the source.
471 if islocal(source):
482 if islocal(source):
472 defaultpath = os.path.abspath(util.urllocalpath(source))
483 defaultpath = os.path.abspath(util.urllocalpath(source))
473 else:
484 else:
474 defaultpath = source
485 defaultpath = source
475
486
476 sharerepo = repository(ui, path=sharepath)
487 sharerepo = repository(ui, path=sharepath)
477 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
488 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
478 defaultpath=defaultpath)
489 defaultpath=defaultpath)
479
490
480 # We need to perform a pull against the dest repo to fetch bookmarks
491 # We need to perform a pull against the dest repo to fetch bookmarks
481 # and other non-store data that isn't shared by default. In the case of
492 # and other non-store data that isn't shared by default. In the case of
482 # non-existing shared repo, this means we pull from the remote twice. This
493 # non-existing shared repo, this means we pull from the remote twice. This
483 # is a bit weird. But at the time it was implemented, there wasn't an easy
494 # is a bit weird. But at the time it was implemented, there wasn't an easy
484 # way to pull just non-changegroup data.
495 # way to pull just non-changegroup data.
485 destrepo = repository(ui, path=dest)
496 destrepo = repository(ui, path=dest)
486 exchange.pull(destrepo, srcpeer, heads=revs)
497 exchange.pull(destrepo, srcpeer, heads=revs)
487
498
488 _postshareupdate(destrepo, update)
499 _postshareupdate(destrepo, update)
489
500
490 return srcpeer, peer(ui, peeropts, dest)
501 return srcpeer, peer(ui, peeropts, dest)
491
502
492 # Recomputing branch cache might be slow on big repos,
503 # Recomputing branch cache might be slow on big repos,
493 # so just copy it
504 # so just copy it
494 def _copycache(srcrepo, dstcachedir, fname):
505 def _copycache(srcrepo, dstcachedir, fname):
495 """copy a cache from srcrepo to destcachedir (if it exists)"""
506 """copy a cache from srcrepo to destcachedir (if it exists)"""
496 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
507 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
497 dstbranchcache = os.path.join(dstcachedir, fname)
508 dstbranchcache = os.path.join(dstcachedir, fname)
498 if os.path.exists(srcbranchcache):
509 if os.path.exists(srcbranchcache):
499 if not os.path.exists(dstcachedir):
510 if not os.path.exists(dstcachedir):
500 os.mkdir(dstcachedir)
511 os.mkdir(dstcachedir)
501 util.copyfile(srcbranchcache, dstbranchcache)
512 util.copyfile(srcbranchcache, dstbranchcache)
502
513
503 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
514 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
504 update=True, stream=False, branch=None, shareopts=None,
515 update=True, stream=False, branch=None, shareopts=None,
505 storeincludepats=None, storeexcludepats=None):
516 storeincludepats=None, storeexcludepats=None):
506 """Make a copy of an existing repository.
517 """Make a copy of an existing repository.
507
518
508 Create a copy of an existing repository in a new directory. The
519 Create a copy of an existing repository in a new directory. The
509 source and destination are URLs, as passed to the repository
520 source and destination are URLs, as passed to the repository
510 function. Returns a pair of repository peers, the source and
521 function. Returns a pair of repository peers, the source and
511 newly created destination.
522 newly created destination.
512
523
513 The location of the source is added to the new repository's
524 The location of the source is added to the new repository's
514 .hg/hgrc file, as the default to be used for future pulls and
525 .hg/hgrc file, as the default to be used for future pulls and
515 pushes.
526 pushes.
516
527
517 If an exception is raised, the partly cloned/updated destination
528 If an exception is raised, the partly cloned/updated destination
518 repository will be deleted.
529 repository will be deleted.
519
530
520 Arguments:
531 Arguments:
521
532
522 source: repository object or URL
533 source: repository object or URL
523
534
524 dest: URL of destination repository to create (defaults to base
535 dest: URL of destination repository to create (defaults to base
525 name of source repository)
536 name of source repository)
526
537
527 pull: always pull from source repository, even in local case or if the
538 pull: always pull from source repository, even in local case or if the
528 server prefers streaming
539 server prefers streaming
529
540
530 stream: stream raw data uncompressed from repository (fast over
541 stream: stream raw data uncompressed from repository (fast over
531 LAN, slow over WAN)
542 LAN, slow over WAN)
532
543
533 revs: revision to clone up to (implies pull=True)
544 revs: revision to clone up to (implies pull=True)
534
545
535 update: update working directory after clone completes, if
546 update: update working directory after clone completes, if
536 destination is local repository (True means update to default rev,
547 destination is local repository (True means update to default rev,
537 anything else is treated as a revision)
548 anything else is treated as a revision)
538
549
539 branch: branches to clone
550 branch: branches to clone
540
551
541 shareopts: dict of options to control auto sharing behavior. The "pool" key
552 shareopts: dict of options to control auto sharing behavior. The "pool" key
542 activates auto sharing mode and defines the directory for stores. The
553 activates auto sharing mode and defines the directory for stores. The
543 "mode" key determines how to construct the directory name of the shared
554 "mode" key determines how to construct the directory name of the shared
544 repository. "identity" means the name is derived from the node of the first
555 repository. "identity" means the name is derived from the node of the first
545 changeset in the repository. "remote" means the name is derived from the
556 changeset in the repository. "remote" means the name is derived from the
546 remote's path/URL. Defaults to "identity."
557 remote's path/URL. Defaults to "identity."
547
558
548 storeincludepats and storeexcludepats: sets of file patterns to include and
559 storeincludepats and storeexcludepats: sets of file patterns to include and
549 exclude in the repository copy, respectively. If not defined, all files
560 exclude in the repository copy, respectively. If not defined, all files
550 will be included (a "full" clone). Otherwise a "narrow" clone containing
561 will be included (a "full" clone). Otherwise a "narrow" clone containing
551 only the requested files will be performed. If ``storeincludepats`` is not
562 only the requested files will be performed. If ``storeincludepats`` is not
552 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
563 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
553 ``path:.``. If both are empty sets, no files will be cloned.
564 ``path:.``. If both are empty sets, no files will be cloned.
554 """
565 """
555
566
556 if isinstance(source, bytes):
567 if isinstance(source, bytes):
557 origsource = ui.expandpath(source)
568 origsource = ui.expandpath(source)
558 source, branches = parseurl(origsource, branch)
569 source, branches = parseurl(origsource, branch)
559 srcpeer = peer(ui, peeropts, source)
570 srcpeer = peer(ui, peeropts, source)
560 else:
571 else:
561 srcpeer = source.peer() # in case we were called with a localrepo
572 srcpeer = source.peer() # in case we were called with a localrepo
562 branches = (None, branch or [])
573 branches = (None, branch or [])
563 origsource = source = srcpeer.url()
574 origsource = source = srcpeer.url()
564 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
575 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
565
576
566 if dest is None:
577 if dest is None:
567 dest = defaultdest(source)
578 dest = defaultdest(source)
568 if dest:
579 if dest:
569 ui.status(_("destination directory: %s\n") % dest)
580 ui.status(_("destination directory: %s\n") % dest)
570 else:
581 else:
571 dest = ui.expandpath(dest)
582 dest = ui.expandpath(dest)
572
583
573 dest = util.urllocalpath(dest)
584 dest = util.urllocalpath(dest)
574 source = util.urllocalpath(source)
585 source = util.urllocalpath(source)
575
586
576 if not dest:
587 if not dest:
577 raise error.Abort(_("empty destination path is not valid"))
588 raise error.Abort(_("empty destination path is not valid"))
578
589
579 destvfs = vfsmod.vfs(dest, expandpath=True)
590 destvfs = vfsmod.vfs(dest, expandpath=True)
580 if destvfs.lexists():
591 if destvfs.lexists():
581 if not destvfs.isdir():
592 if not destvfs.isdir():
582 raise error.Abort(_("destination '%s' already exists") % dest)
593 raise error.Abort(_("destination '%s' already exists") % dest)
583 elif destvfs.listdir():
594 elif destvfs.listdir():
584 raise error.Abort(_("destination '%s' is not empty") % dest)
595 raise error.Abort(_("destination '%s' is not empty") % dest)
585
596
586 createopts = {}
597 createopts = {}
587 narrow = False
598 narrow = False
588
599
589 if storeincludepats is not None:
600 if storeincludepats is not None:
590 narrowspec.validatepatterns(storeincludepats)
601 narrowspec.validatepatterns(storeincludepats)
591 narrow = True
602 narrow = True
592
603
593 if storeexcludepats is not None:
604 if storeexcludepats is not None:
594 narrowspec.validatepatterns(storeexcludepats)
605 narrowspec.validatepatterns(storeexcludepats)
595 narrow = True
606 narrow = True
596
607
597 if narrow:
608 if narrow:
598 # Include everything by default if only exclusion patterns defined.
609 # Include everything by default if only exclusion patterns defined.
599 if storeexcludepats and not storeincludepats:
610 if storeexcludepats and not storeincludepats:
600 storeincludepats = {'path:.'}
611 storeincludepats = {'path:.'}
601
612
602 createopts['narrowfiles'] = True
613 createopts['narrowfiles'] = True
603
614
604 shareopts = shareopts or {}
615 shareopts = shareopts or {}
605 sharepool = shareopts.get('pool')
616 sharepool = shareopts.get('pool')
606 sharenamemode = shareopts.get('mode')
617 sharenamemode = shareopts.get('mode')
607 if sharepool and islocal(dest):
618 if sharepool and islocal(dest):
608 sharepath = None
619 sharepath = None
609 if sharenamemode == 'identity':
620 if sharenamemode == 'identity':
610 # Resolve the name from the initial changeset in the remote
621 # Resolve the name from the initial changeset in the remote
611 # repository. This returns nullid when the remote is empty. It
622 # repository. This returns nullid when the remote is empty. It
612 # raises RepoLookupError if revision 0 is filtered or otherwise
623 # raises RepoLookupError if revision 0 is filtered or otherwise
613 # not available. If we fail to resolve, sharing is not enabled.
624 # not available. If we fail to resolve, sharing is not enabled.
614 try:
625 try:
615 with srcpeer.commandexecutor() as e:
626 with srcpeer.commandexecutor() as e:
616 rootnode = e.callcommand('lookup', {
627 rootnode = e.callcommand('lookup', {
617 'key': '0',
628 'key': '0',
618 }).result()
629 }).result()
619
630
620 if rootnode != node.nullid:
631 if rootnode != node.nullid:
621 sharepath = os.path.join(sharepool, node.hex(rootnode))
632 sharepath = os.path.join(sharepool, node.hex(rootnode))
622 else:
633 else:
623 ui.status(_('(not using pooled storage: '
634 ui.status(_('(not using pooled storage: '
624 'remote appears to be empty)\n'))
635 'remote appears to be empty)\n'))
625 except error.RepoLookupError:
636 except error.RepoLookupError:
626 ui.status(_('(not using pooled storage: '
637 ui.status(_('(not using pooled storage: '
627 'unable to resolve identity of remote)\n'))
638 'unable to resolve identity of remote)\n'))
628 elif sharenamemode == 'remote':
639 elif sharenamemode == 'remote':
629 sharepath = os.path.join(
640 sharepath = os.path.join(
630 sharepool, node.hex(hashlib.sha1(source).digest()))
641 sharepool, node.hex(hashlib.sha1(source).digest()))
631 else:
642 else:
632 raise error.Abort(_('unknown share naming mode: %s') %
643 raise error.Abort(_('unknown share naming mode: %s') %
633 sharenamemode)
644 sharenamemode)
634
645
635 # TODO this is a somewhat arbitrary restriction.
646 # TODO this is a somewhat arbitrary restriction.
636 if narrow:
647 if narrow:
637 ui.status(_('(pooled storage not supported for narrow clones)\n'))
648 ui.status(_('(pooled storage not supported for narrow clones)\n'))
638 sharepath = None
649 sharepath = None
639
650
640 if sharepath:
651 if sharepath:
641 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
652 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
642 dest, pull=pull, rev=revs, update=update,
653 dest, pull=pull, rev=revs, update=update,
643 stream=stream)
654 stream=stream)
644
655
645 srclock = destlock = cleandir = None
656 srclock = destlock = cleandir = None
646 srcrepo = srcpeer.local()
657 srcrepo = srcpeer.local()
647 try:
658 try:
648 abspath = origsource
659 abspath = origsource
649 if islocal(origsource):
660 if islocal(origsource):
650 abspath = os.path.abspath(util.urllocalpath(origsource))
661 abspath = os.path.abspath(util.urllocalpath(origsource))
651
662
652 if islocal(dest):
663 if islocal(dest):
653 cleandir = dest
664 cleandir = dest
654
665
655 copy = False
666 copy = False
656 if (srcrepo and srcrepo.cancopy() and islocal(dest)
667 if (srcrepo and srcrepo.cancopy() and islocal(dest)
657 and not phases.hassecret(srcrepo)):
668 and not phases.hassecret(srcrepo)):
658 copy = not pull and not revs
669 copy = not pull and not revs
659
670
660 # TODO this is a somewhat arbitrary restriction.
671 # TODO this is a somewhat arbitrary restriction.
661 if narrow:
672 if narrow:
662 copy = False
673 copy = False
663
674
664 if copy:
675 if copy:
665 try:
676 try:
666 # we use a lock here because if we race with commit, we
677 # we use a lock here because if we race with commit, we
667 # can end up with extra data in the cloned revlogs that's
678 # can end up with extra data in the cloned revlogs that's
668 # not pointed to by changesets, thus causing verify to
679 # not pointed to by changesets, thus causing verify to
669 # fail
680 # fail
670 srclock = srcrepo.lock(wait=False)
681 srclock = srcrepo.lock(wait=False)
671 except error.LockError:
682 except error.LockError:
672 copy = False
683 copy = False
673
684
674 if copy:
685 if copy:
675 srcrepo.hook('preoutgoing', throw=True, source='clone')
686 srcrepo.hook('preoutgoing', throw=True, source='clone')
676 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
687 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
677 if not os.path.exists(dest):
688 if not os.path.exists(dest):
678 util.makedirs(dest)
689 util.makedirs(dest)
679 else:
690 else:
680 # only clean up directories we create ourselves
691 # only clean up directories we create ourselves
681 cleandir = hgdir
692 cleandir = hgdir
682 try:
693 try:
683 destpath = hgdir
694 destpath = hgdir
684 util.makedir(destpath, notindexed=True)
695 util.makedir(destpath, notindexed=True)
685 except OSError as inst:
696 except OSError as inst:
686 if inst.errno == errno.EEXIST:
697 if inst.errno == errno.EEXIST:
687 cleandir = None
698 cleandir = None
688 raise error.Abort(_("destination '%s' already exists")
699 raise error.Abort(_("destination '%s' already exists")
689 % dest)
700 % dest)
690 raise
701 raise
691
702
692 destlock = copystore(ui, srcrepo, destpath)
703 destlock = copystore(ui, srcrepo, destpath)
693 # copy bookmarks over
704 # copy bookmarks over
694 srcbookmarks = srcrepo.vfs.join('bookmarks')
705 srcbookmarks = srcrepo.vfs.join('bookmarks')
695 dstbookmarks = os.path.join(destpath, 'bookmarks')
706 dstbookmarks = os.path.join(destpath, 'bookmarks')
696 if os.path.exists(srcbookmarks):
707 if os.path.exists(srcbookmarks):
697 util.copyfile(srcbookmarks, dstbookmarks)
708 util.copyfile(srcbookmarks, dstbookmarks)
698
709
699 dstcachedir = os.path.join(destpath, 'cache')
710 dstcachedir = os.path.join(destpath, 'cache')
700 for cache in cacheutil.cachetocopy(srcrepo):
711 for cache in cacheutil.cachetocopy(srcrepo):
701 _copycache(srcrepo, dstcachedir, cache)
712 _copycache(srcrepo, dstcachedir, cache)
702
713
703 # we need to re-init the repo after manually copying the data
714 # we need to re-init the repo after manually copying the data
704 # into it
715 # into it
705 destpeer = peer(srcrepo, peeropts, dest)
716 destpeer = peer(srcrepo, peeropts, dest)
706 srcrepo.hook('outgoing', source='clone',
717 srcrepo.hook('outgoing', source='clone',
707 node=node.hex(node.nullid))
718 node=node.hex(node.nullid))
708 else:
719 else:
709 try:
720 try:
710 # only pass ui when no srcrepo
721 # only pass ui when no srcrepo
711 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
722 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
712 createopts=createopts)
723 createopts=createopts)
713 except OSError as inst:
724 except OSError as inst:
714 if inst.errno == errno.EEXIST:
725 if inst.errno == errno.EEXIST:
715 cleandir = None
726 cleandir = None
716 raise error.Abort(_("destination '%s' already exists")
727 raise error.Abort(_("destination '%s' already exists")
717 % dest)
728 % dest)
718 raise
729 raise
719
730
720 if revs:
731 if revs:
721 if not srcpeer.capable('lookup'):
732 if not srcpeer.capable('lookup'):
722 raise error.Abort(_("src repository does not support "
733 raise error.Abort(_("src repository does not support "
723 "revision lookup and so doesn't "
734 "revision lookup and so doesn't "
724 "support clone by revision"))
735 "support clone by revision"))
725
736
726 # TODO this is batchable.
737 # TODO this is batchable.
727 remoterevs = []
738 remoterevs = []
728 for rev in revs:
739 for rev in revs:
729 with srcpeer.commandexecutor() as e:
740 with srcpeer.commandexecutor() as e:
730 remoterevs.append(e.callcommand('lookup', {
741 remoterevs.append(e.callcommand('lookup', {
731 'key': rev,
742 'key': rev,
732 }).result())
743 }).result())
733 revs = remoterevs
744 revs = remoterevs
734
745
735 checkout = revs[0]
746 checkout = revs[0]
736 else:
747 else:
737 revs = None
748 revs = None
738 local = destpeer.local()
749 local = destpeer.local()
739 if local:
750 if local:
740 if narrow:
751 if narrow:
741 with local.lock():
752 with local.lock():
742 local.setnarrowpats(storeincludepats, storeexcludepats)
753 local.setnarrowpats(storeincludepats, storeexcludepats)
743
754
744 u = util.url(abspath)
755 u = util.url(abspath)
745 defaulturl = bytes(u)
756 defaulturl = bytes(u)
746 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
757 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
747 if not stream:
758 if not stream:
748 if pull:
759 if pull:
749 stream = False
760 stream = False
750 else:
761 else:
751 stream = None
762 stream = None
752 # internal config: ui.quietbookmarkmove
763 # internal config: ui.quietbookmarkmove
753 overrides = {('ui', 'quietbookmarkmove'): True}
764 overrides = {('ui', 'quietbookmarkmove'): True}
754 with local.ui.configoverride(overrides, 'clone'):
765 with local.ui.configoverride(overrides, 'clone'):
755 exchange.pull(local, srcpeer, revs,
766 exchange.pull(local, srcpeer, revs,
756 streamclonerequested=stream,
767 streamclonerequested=stream,
757 includepats=storeincludepats,
768 includepats=storeincludepats,
758 excludepats=storeexcludepats)
769 excludepats=storeexcludepats)
759 elif srcrepo:
770 elif srcrepo:
760 # TODO lift restriction once exchange.push() accepts narrow
771 # TODO lift restriction once exchange.push() accepts narrow
761 # push.
772 # push.
762 if narrow:
773 if narrow:
763 raise error.Abort(_('narrow clone not available for '
774 raise error.Abort(_('narrow clone not available for '
764 'remote destinations'))
775 'remote destinations'))
765
776
766 exchange.push(srcrepo, destpeer, revs=revs,
777 exchange.push(srcrepo, destpeer, revs=revs,
767 bookmarks=srcrepo._bookmarks.keys())
778 bookmarks=srcrepo._bookmarks.keys())
768 else:
779 else:
769 raise error.Abort(_("clone from remote to remote not supported")
780 raise error.Abort(_("clone from remote to remote not supported")
770 )
781 )
771
782
772 cleandir = None
783 cleandir = None
773
784
774 destrepo = destpeer.local()
785 destrepo = destpeer.local()
775 if destrepo:
786 if destrepo:
776 template = uimod.samplehgrcs['cloned']
787 template = uimod.samplehgrcs['cloned']
777 u = util.url(abspath)
788 u = util.url(abspath)
778 u.passwd = None
789 u.passwd = None
779 defaulturl = bytes(u)
790 defaulturl = bytes(u)
780 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
791 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
781 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
792 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
782
793
783 if ui.configbool('experimental', 'remotenames'):
794 if ui.configbool('experimental', 'remotenames'):
784 logexchange.pullremotenames(destrepo, srcpeer)
795 logexchange.pullremotenames(destrepo, srcpeer)
785
796
786 if update:
797 if update:
787 if update is not True:
798 if update is not True:
788 with srcpeer.commandexecutor() as e:
799 with srcpeer.commandexecutor() as e:
789 checkout = e.callcommand('lookup', {
800 checkout = e.callcommand('lookup', {
790 'key': update,
801 'key': update,
791 }).result()
802 }).result()
792
803
793 uprev = None
804 uprev = None
794 status = None
805 status = None
795 if checkout is not None:
806 if checkout is not None:
796 # Some extensions (at least hg-git and hg-subversion) have
807 # Some extensions (at least hg-git and hg-subversion) have
797 # a peer.lookup() implementation that returns a name instead
808 # a peer.lookup() implementation that returns a name instead
798 # of a nodeid. We work around it here until we've figured
809 # of a nodeid. We work around it here until we've figured
799 # out a better solution.
810 # out a better solution.
800 if len(checkout) == 20 and checkout in destrepo:
811 if len(checkout) == 20 and checkout in destrepo:
801 uprev = checkout
812 uprev = checkout
802 elif scmutil.isrevsymbol(destrepo, checkout):
813 elif scmutil.isrevsymbol(destrepo, checkout):
803 uprev = scmutil.revsymbol(destrepo, checkout).node()
814 uprev = scmutil.revsymbol(destrepo, checkout).node()
804 else:
815 else:
805 if update is not True:
816 if update is not True:
806 try:
817 try:
807 uprev = destrepo.lookup(update)
818 uprev = destrepo.lookup(update)
808 except error.RepoLookupError:
819 except error.RepoLookupError:
809 pass
820 pass
810 if uprev is None:
821 if uprev is None:
811 try:
822 try:
812 uprev = destrepo._bookmarks['@']
823 uprev = destrepo._bookmarks['@']
813 update = '@'
824 update = '@'
814 bn = destrepo[uprev].branch()
825 bn = destrepo[uprev].branch()
815 if bn == 'default':
826 if bn == 'default':
816 status = _("updating to bookmark @\n")
827 status = _("updating to bookmark @\n")
817 else:
828 else:
818 status = (_("updating to bookmark @ on branch %s\n")
829 status = (_("updating to bookmark @ on branch %s\n")
819 % bn)
830 % bn)
820 except KeyError:
831 except KeyError:
821 try:
832 try:
822 uprev = destrepo.branchtip('default')
833 uprev = destrepo.branchtip('default')
823 except error.RepoLookupError:
834 except error.RepoLookupError:
824 uprev = destrepo.lookup('tip')
835 uprev = destrepo.lookup('tip')
825 if not status:
836 if not status:
826 bn = destrepo[uprev].branch()
837 bn = destrepo[uprev].branch()
827 status = _("updating to branch %s\n") % bn
838 status = _("updating to branch %s\n") % bn
828 destrepo.ui.status(status)
839 destrepo.ui.status(status)
829 _update(destrepo, uprev)
840 _update(destrepo, uprev)
830 if update in destrepo._bookmarks:
841 if update in destrepo._bookmarks:
831 bookmarks.activate(destrepo, update)
842 bookmarks.activate(destrepo, update)
832 finally:
843 finally:
833 release(srclock, destlock)
844 release(srclock, destlock)
834 if cleandir is not None:
845 if cleandir is not None:
835 shutil.rmtree(cleandir, True)
846 shutil.rmtree(cleandir, True)
836 if srcpeer is not None:
847 if srcpeer is not None:
837 srcpeer.close()
848 srcpeer.close()
838 return srcpeer, destpeer
849 return srcpeer, destpeer
839
850
840 def _showstats(repo, stats, quietempty=False):
851 def _showstats(repo, stats, quietempty=False):
841 if quietempty and stats.isempty():
852 if quietempty and stats.isempty():
842 return
853 return
843 repo.ui.status(_("%d files updated, %d files merged, "
854 repo.ui.status(_("%d files updated, %d files merged, "
844 "%d files removed, %d files unresolved\n") % (
855 "%d files removed, %d files unresolved\n") % (
845 stats.updatedcount, stats.mergedcount,
856 stats.updatedcount, stats.mergedcount,
846 stats.removedcount, stats.unresolvedcount))
857 stats.removedcount, stats.unresolvedcount))
847
858
848 def updaterepo(repo, node, overwrite, updatecheck=None):
859 def updaterepo(repo, node, overwrite, updatecheck=None):
849 """Update the working directory to node.
860 """Update the working directory to node.
850
861
851 When overwrite is set, changes are clobbered, merged else
862 When overwrite is set, changes are clobbered, merged else
852
863
853 returns stats (see pydoc mercurial.merge.applyupdates)"""
864 returns stats (see pydoc mercurial.merge.applyupdates)"""
854 return mergemod.update(repo, node, False, overwrite,
865 return mergemod.update(repo, node, False, overwrite,
855 labels=['working copy', 'destination'],
866 labels=['working copy', 'destination'],
856 updatecheck=updatecheck)
867 updatecheck=updatecheck)
857
868
858 def update(repo, node, quietempty=False, updatecheck=None):
869 def update(repo, node, quietempty=False, updatecheck=None):
859 """update the working directory to node"""
870 """update the working directory to node"""
860 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
871 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
861 _showstats(repo, stats, quietempty)
872 _showstats(repo, stats, quietempty)
862 if stats.unresolvedcount:
873 if stats.unresolvedcount:
863 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
874 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
864 return stats.unresolvedcount > 0
875 return stats.unresolvedcount > 0
865
876
866 # naming conflict in clone()
877 # naming conflict in clone()
867 _update = update
878 _update = update
868
879
869 def clean(repo, node, show_stats=True, quietempty=False):
880 def clean(repo, node, show_stats=True, quietempty=False):
870 """forcibly switch the working directory to node, clobbering changes"""
881 """forcibly switch the working directory to node, clobbering changes"""
871 stats = updaterepo(repo, node, True)
882 stats = updaterepo(repo, node, True)
872 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
883 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
873 if show_stats:
884 if show_stats:
874 _showstats(repo, stats, quietempty)
885 _showstats(repo, stats, quietempty)
875 return stats.unresolvedcount > 0
886 return stats.unresolvedcount > 0
876
887
877 # naming conflict in updatetotally()
888 # naming conflict in updatetotally()
878 _clean = clean
889 _clean = clean
879
890
880 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
891 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
881 """Update the working directory with extra care for non-file components
892 """Update the working directory with extra care for non-file components
882
893
883 This takes care of non-file components below:
894 This takes care of non-file components below:
884
895
885 :bookmark: might be advanced or (in)activated
896 :bookmark: might be advanced or (in)activated
886
897
887 This takes arguments below:
898 This takes arguments below:
888
899
889 :checkout: to which revision the working directory is updated
900 :checkout: to which revision the working directory is updated
890 :brev: a name, which might be a bookmark to be activated after updating
901 :brev: a name, which might be a bookmark to be activated after updating
891 :clean: whether changes in the working directory can be discarded
902 :clean: whether changes in the working directory can be discarded
892 :updatecheck: how to deal with a dirty working directory
903 :updatecheck: how to deal with a dirty working directory
893
904
894 Valid values for updatecheck are (None => linear):
905 Valid values for updatecheck are (None => linear):
895
906
896 * abort: abort if the working directory is dirty
907 * abort: abort if the working directory is dirty
897 * none: don't check (merge working directory changes into destination)
908 * none: don't check (merge working directory changes into destination)
898 * linear: check that update is linear before merging working directory
909 * linear: check that update is linear before merging working directory
899 changes into destination
910 changes into destination
900 * noconflict: check that the update does not result in file merges
911 * noconflict: check that the update does not result in file merges
901
912
902 This returns whether conflict is detected at updating or not.
913 This returns whether conflict is detected at updating or not.
903 """
914 """
904 if updatecheck is None:
915 if updatecheck is None:
905 updatecheck = ui.config('commands', 'update.check')
916 updatecheck = ui.config('commands', 'update.check')
906 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
917 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
907 # If not configured, or invalid value configured
918 # If not configured, or invalid value configured
908 updatecheck = 'linear'
919 updatecheck = 'linear'
909 with repo.wlock():
920 with repo.wlock():
910 movemarkfrom = None
921 movemarkfrom = None
911 warndest = False
922 warndest = False
912 if checkout is None:
923 if checkout is None:
913 updata = destutil.destupdate(repo, clean=clean)
924 updata = destutil.destupdate(repo, clean=clean)
914 checkout, movemarkfrom, brev = updata
925 checkout, movemarkfrom, brev = updata
915 warndest = True
926 warndest = True
916
927
917 if clean:
928 if clean:
918 ret = _clean(repo, checkout)
929 ret = _clean(repo, checkout)
919 else:
930 else:
920 if updatecheck == 'abort':
931 if updatecheck == 'abort':
921 cmdutil.bailifchanged(repo, merge=False)
932 cmdutil.bailifchanged(repo, merge=False)
922 updatecheck = 'none'
933 updatecheck = 'none'
923 ret = _update(repo, checkout, updatecheck=updatecheck)
934 ret = _update(repo, checkout, updatecheck=updatecheck)
924
935
925 if not ret and movemarkfrom:
936 if not ret and movemarkfrom:
926 if movemarkfrom == repo['.'].node():
937 if movemarkfrom == repo['.'].node():
927 pass # no-op update
938 pass # no-op update
928 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
939 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
929 b = ui.label(repo._activebookmark, 'bookmarks.active')
940 b = ui.label(repo._activebookmark, 'bookmarks.active')
930 ui.status(_("updating bookmark %s\n") % b)
941 ui.status(_("updating bookmark %s\n") % b)
931 else:
942 else:
932 # this can happen with a non-linear update
943 # this can happen with a non-linear update
933 b = ui.label(repo._activebookmark, 'bookmarks')
944 b = ui.label(repo._activebookmark, 'bookmarks')
934 ui.status(_("(leaving bookmark %s)\n") % b)
945 ui.status(_("(leaving bookmark %s)\n") % b)
935 bookmarks.deactivate(repo)
946 bookmarks.deactivate(repo)
936 elif brev in repo._bookmarks:
947 elif brev in repo._bookmarks:
937 if brev != repo._activebookmark:
948 if brev != repo._activebookmark:
938 b = ui.label(brev, 'bookmarks.active')
949 b = ui.label(brev, 'bookmarks.active')
939 ui.status(_("(activating bookmark %s)\n") % b)
950 ui.status(_("(activating bookmark %s)\n") % b)
940 bookmarks.activate(repo, brev)
951 bookmarks.activate(repo, brev)
941 elif brev:
952 elif brev:
942 if repo._activebookmark:
953 if repo._activebookmark:
943 b = ui.label(repo._activebookmark, 'bookmarks')
954 b = ui.label(repo._activebookmark, 'bookmarks')
944 ui.status(_("(leaving bookmark %s)\n") % b)
955 ui.status(_("(leaving bookmark %s)\n") % b)
945 bookmarks.deactivate(repo)
956 bookmarks.deactivate(repo)
946
957
947 if warndest:
958 if warndest:
948 destutil.statusotherdests(ui, repo)
959 destutil.statusotherdests(ui, repo)
949
960
950 return ret
961 return ret
951
962
952 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
963 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
953 abort=False):
964 abort=False):
954 """Branch merge with node, resolving changes. Return true if any
965 """Branch merge with node, resolving changes. Return true if any
955 unresolved conflicts."""
966 unresolved conflicts."""
956 if not abort:
967 if not abort:
957 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
968 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
958 labels=labels)
969 labels=labels)
959 else:
970 else:
960 ms = mergemod.mergestate.read(repo)
971 ms = mergemod.mergestate.read(repo)
961 if ms.active():
972 if ms.active():
962 # there were conflicts
973 # there were conflicts
963 node = ms.localctx.hex()
974 node = ms.localctx.hex()
964 else:
975 else:
965 # there were no conficts, mergestate was not stored
976 # there were no conficts, mergestate was not stored
966 node = repo['.'].hex()
977 node = repo['.'].hex()
967
978
968 repo.ui.status(_("aborting the merge, updating back to"
979 repo.ui.status(_("aborting the merge, updating back to"
969 " %s\n") % node[:12])
980 " %s\n") % node[:12])
970 stats = mergemod.update(repo, node, branchmerge=False, force=True,
981 stats = mergemod.update(repo, node, branchmerge=False, force=True,
971 labels=labels)
982 labels=labels)
972
983
973 _showstats(repo, stats)
984 _showstats(repo, stats)
974 if stats.unresolvedcount:
985 if stats.unresolvedcount:
975 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
986 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
976 "or 'hg merge --abort' to abandon\n"))
987 "or 'hg merge --abort' to abandon\n"))
977 elif remind and not abort:
988 elif remind and not abort:
978 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
989 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
979 return stats.unresolvedcount > 0
990 return stats.unresolvedcount > 0
980
991
981 def _incoming(displaychlist, subreporecurse, ui, repo, source,
992 def _incoming(displaychlist, subreporecurse, ui, repo, source,
982 opts, buffered=False):
993 opts, buffered=False):
983 """
994 """
984 Helper for incoming / gincoming.
995 Helper for incoming / gincoming.
985 displaychlist gets called with
996 displaychlist gets called with
986 (remoterepo, incomingchangesetlist, displayer) parameters,
997 (remoterepo, incomingchangesetlist, displayer) parameters,
987 and is supposed to contain only code that can't be unified.
998 and is supposed to contain only code that can't be unified.
988 """
999 """
989 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
1000 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
990 other = peer(repo, opts, source)
1001 other = peer(repo, opts, source)
991 ui.status(_('comparing with %s\n') % util.hidepassword(source))
1002 ui.status(_('comparing with %s\n') % util.hidepassword(source))
992 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
1003 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
993
1004
994 if revs:
1005 if revs:
995 revs = [other.lookup(rev) for rev in revs]
1006 revs = [other.lookup(rev) for rev in revs]
996 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
1007 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
997 revs, opts["bundle"], opts["force"])
1008 revs, opts["bundle"], opts["force"])
998 try:
1009 try:
999 if not chlist:
1010 if not chlist:
1000 ui.status(_("no changes found\n"))
1011 ui.status(_("no changes found\n"))
1001 return subreporecurse()
1012 return subreporecurse()
1002 ui.pager('incoming')
1013 ui.pager('incoming')
1003 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1014 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1004 buffered=buffered)
1015 buffered=buffered)
1005 displaychlist(other, chlist, displayer)
1016 displaychlist(other, chlist, displayer)
1006 displayer.close()
1017 displayer.close()
1007 finally:
1018 finally:
1008 cleanupfn()
1019 cleanupfn()
1009 subreporecurse()
1020 subreporecurse()
1010 return 0 # exit code is zero since we found incoming changes
1021 return 0 # exit code is zero since we found incoming changes
1011
1022
1012 def incoming(ui, repo, source, opts):
1023 def incoming(ui, repo, source, opts):
1013 def subreporecurse():
1024 def subreporecurse():
1014 ret = 1
1025 ret = 1
1015 if opts.get('subrepos'):
1026 if opts.get('subrepos'):
1016 ctx = repo[None]
1027 ctx = repo[None]
1017 for subpath in sorted(ctx.substate):
1028 for subpath in sorted(ctx.substate):
1018 sub = ctx.sub(subpath)
1029 sub = ctx.sub(subpath)
1019 ret = min(ret, sub.incoming(ui, source, opts))
1030 ret = min(ret, sub.incoming(ui, source, opts))
1020 return ret
1031 return ret
1021
1032
1022 def display(other, chlist, displayer):
1033 def display(other, chlist, displayer):
1023 limit = logcmdutil.getlimit(opts)
1034 limit = logcmdutil.getlimit(opts)
1024 if opts.get('newest_first'):
1035 if opts.get('newest_first'):
1025 chlist.reverse()
1036 chlist.reverse()
1026 count = 0
1037 count = 0
1027 for n in chlist:
1038 for n in chlist:
1028 if limit is not None and count >= limit:
1039 if limit is not None and count >= limit:
1029 break
1040 break
1030 parents = [p for p in other.changelog.parents(n) if p != nullid]
1041 parents = [p for p in other.changelog.parents(n) if p != nullid]
1031 if opts.get('no_merges') and len(parents) == 2:
1042 if opts.get('no_merges') and len(parents) == 2:
1032 continue
1043 continue
1033 count += 1
1044 count += 1
1034 displayer.show(other[n])
1045 displayer.show(other[n])
1035 return _incoming(display, subreporecurse, ui, repo, source, opts)
1046 return _incoming(display, subreporecurse, ui, repo, source, opts)
1036
1047
1037 def _outgoing(ui, repo, dest, opts):
1048 def _outgoing(ui, repo, dest, opts):
1038 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1049 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1039 if not path:
1050 if not path:
1040 raise error.Abort(_('default repository not configured!'),
1051 raise error.Abort(_('default repository not configured!'),
1041 hint=_("see 'hg help config.paths'"))
1052 hint=_("see 'hg help config.paths'"))
1042 dest = path.pushloc or path.loc
1053 dest = path.pushloc or path.loc
1043 branches = path.branch, opts.get('branch') or []
1054 branches = path.branch, opts.get('branch') or []
1044
1055
1045 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1056 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1046 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1057 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1047 if revs:
1058 if revs:
1048 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1059 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1049
1060
1050 other = peer(repo, opts, dest)
1061 other = peer(repo, opts, dest)
1051 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1062 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1052 force=opts.get('force'))
1063 force=opts.get('force'))
1053 o = outgoing.missing
1064 o = outgoing.missing
1054 if not o:
1065 if not o:
1055 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1066 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1056 return o, other
1067 return o, other
1057
1068
1058 def outgoing(ui, repo, dest, opts):
1069 def outgoing(ui, repo, dest, opts):
1059 def recurse():
1070 def recurse():
1060 ret = 1
1071 ret = 1
1061 if opts.get('subrepos'):
1072 if opts.get('subrepos'):
1062 ctx = repo[None]
1073 ctx = repo[None]
1063 for subpath in sorted(ctx.substate):
1074 for subpath in sorted(ctx.substate):
1064 sub = ctx.sub(subpath)
1075 sub = ctx.sub(subpath)
1065 ret = min(ret, sub.outgoing(ui, dest, opts))
1076 ret = min(ret, sub.outgoing(ui, dest, opts))
1066 return ret
1077 return ret
1067
1078
1068 limit = logcmdutil.getlimit(opts)
1079 limit = logcmdutil.getlimit(opts)
1069 o, other = _outgoing(ui, repo, dest, opts)
1080 o, other = _outgoing(ui, repo, dest, opts)
1070 if not o:
1081 if not o:
1071 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1082 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1072 return recurse()
1083 return recurse()
1073
1084
1074 if opts.get('newest_first'):
1085 if opts.get('newest_first'):
1075 o.reverse()
1086 o.reverse()
1076 ui.pager('outgoing')
1087 ui.pager('outgoing')
1077 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1088 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1078 count = 0
1089 count = 0
1079 for n in o:
1090 for n in o:
1080 if limit is not None and count >= limit:
1091 if limit is not None and count >= limit:
1081 break
1092 break
1082 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1093 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1083 if opts.get('no_merges') and len(parents) == 2:
1094 if opts.get('no_merges') and len(parents) == 2:
1084 continue
1095 continue
1085 count += 1
1096 count += 1
1086 displayer.show(repo[n])
1097 displayer.show(repo[n])
1087 displayer.close()
1098 displayer.close()
1088 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1099 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1089 recurse()
1100 recurse()
1090 return 0 # exit code is zero since we found outgoing changes
1101 return 0 # exit code is zero since we found outgoing changes
1091
1102
1092 def verify(repo):
1103 def verify(repo):
1093 """verify the consistency of a repository"""
1104 """verify the consistency of a repository"""
1094 ret = verifymod.verify(repo)
1105 ret = verifymod.verify(repo)
1095
1106
1096 # Broken subrepo references in hidden csets don't seem worth worrying about,
1107 # Broken subrepo references in hidden csets don't seem worth worrying about,
1097 # since they can't be pushed/pulled, and --hidden can be used if they are a
1108 # since they can't be pushed/pulled, and --hidden can be used if they are a
1098 # concern.
1109 # concern.
1099
1110
1100 # pathto() is needed for -R case
1111 # pathto() is needed for -R case
1101 revs = repo.revs("filelog(%s)",
1112 revs = repo.revs("filelog(%s)",
1102 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1113 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1103
1114
1104 if revs:
1115 if revs:
1105 repo.ui.status(_('checking subrepo links\n'))
1116 repo.ui.status(_('checking subrepo links\n'))
1106 for rev in revs:
1117 for rev in revs:
1107 ctx = repo[rev]
1118 ctx = repo[rev]
1108 try:
1119 try:
1109 for subpath in ctx.substate:
1120 for subpath in ctx.substate:
1110 try:
1121 try:
1111 ret = (ctx.sub(subpath, allowcreate=False).verify()
1122 ret = (ctx.sub(subpath, allowcreate=False).verify()
1112 or ret)
1123 or ret)
1113 except error.RepoError as e:
1124 except error.RepoError as e:
1114 repo.ui.warn(('%d: %s\n') % (rev, e))
1125 repo.ui.warn(('%d: %s\n') % (rev, e))
1115 except Exception:
1126 except Exception:
1116 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1127 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1117 node.short(ctx.node()))
1128 node.short(ctx.node()))
1118
1129
1119 return ret
1130 return ret
1120
1131
1121 def remoteui(src, opts):
1132 def remoteui(src, opts):
1122 'build a remote ui from ui or repo and opts'
1133 'build a remote ui from ui or repo and opts'
1123 if util.safehasattr(src, 'baseui'): # looks like a repository
1134 if util.safehasattr(src, 'baseui'): # looks like a repository
1124 dst = src.baseui.copy() # drop repo-specific config
1135 dst = src.baseui.copy() # drop repo-specific config
1125 src = src.ui # copy target options from repo
1136 src = src.ui # copy target options from repo
1126 else: # assume it's a global ui object
1137 else: # assume it's a global ui object
1127 dst = src.copy() # keep all global options
1138 dst = src.copy() # keep all global options
1128
1139
1129 # copy ssh-specific options
1140 # copy ssh-specific options
1130 for o in 'ssh', 'remotecmd':
1141 for o in 'ssh', 'remotecmd':
1131 v = opts.get(o) or src.config('ui', o)
1142 v = opts.get(o) or src.config('ui', o)
1132 if v:
1143 if v:
1133 dst.setconfig("ui", o, v, 'copied')
1144 dst.setconfig("ui", o, v, 'copied')
1134
1145
1135 # copy bundle-specific options
1146 # copy bundle-specific options
1136 r = src.config('bundle', 'mainreporoot')
1147 r = src.config('bundle', 'mainreporoot')
1137 if r:
1148 if r:
1138 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1149 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1139
1150
1140 # copy selected local settings to the remote ui
1151 # copy selected local settings to the remote ui
1141 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1152 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1142 for key, val in src.configitems(sect):
1153 for key, val in src.configitems(sect):
1143 dst.setconfig(sect, key, val, 'copied')
1154 dst.setconfig(sect, key, val, 'copied')
1144 v = src.config('web', 'cacerts')
1155 v = src.config('web', 'cacerts')
1145 if v:
1156 if v:
1146 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1157 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1147
1158
1148 return dst
1159 return dst
1149
1160
1150 # Files of interest
1161 # Files of interest
1151 # Used to check if the repository has changed looking at mtime and size of
1162 # Used to check if the repository has changed looking at mtime and size of
1152 # these files.
1163 # these files.
1153 foi = [('spath', '00changelog.i'),
1164 foi = [('spath', '00changelog.i'),
1154 ('spath', 'phaseroots'), # ! phase can change content at the same size
1165 ('spath', 'phaseroots'), # ! phase can change content at the same size
1155 ('spath', 'obsstore'),
1166 ('spath', 'obsstore'),
1156 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1167 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1157 ]
1168 ]
1158
1169
1159 class cachedlocalrepo(object):
1170 class cachedlocalrepo(object):
1160 """Holds a localrepository that can be cached and reused."""
1171 """Holds a localrepository that can be cached and reused."""
1161
1172
1162 def __init__(self, repo):
1173 def __init__(self, repo):
1163 """Create a new cached repo from an existing repo.
1174 """Create a new cached repo from an existing repo.
1164
1175
1165 We assume the passed in repo was recently created. If the
1176 We assume the passed in repo was recently created. If the
1166 repo has changed between when it was created and when it was
1177 repo has changed between when it was created and when it was
1167 turned into a cache, it may not refresh properly.
1178 turned into a cache, it may not refresh properly.
1168 """
1179 """
1169 assert isinstance(repo, localrepo.localrepository)
1180 assert isinstance(repo, localrepo.localrepository)
1170 self._repo = repo
1181 self._repo = repo
1171 self._state, self.mtime = self._repostate()
1182 self._state, self.mtime = self._repostate()
1172 self._filtername = repo.filtername
1183 self._filtername = repo.filtername
1173
1184
1174 def fetch(self):
1185 def fetch(self):
1175 """Refresh (if necessary) and return a repository.
1186 """Refresh (if necessary) and return a repository.
1176
1187
1177 If the cached instance is out of date, it will be recreated
1188 If the cached instance is out of date, it will be recreated
1178 automatically and returned.
1189 automatically and returned.
1179
1190
1180 Returns a tuple of the repo and a boolean indicating whether a new
1191 Returns a tuple of the repo and a boolean indicating whether a new
1181 repo instance was created.
1192 repo instance was created.
1182 """
1193 """
1183 # We compare the mtimes and sizes of some well-known files to
1194 # We compare the mtimes and sizes of some well-known files to
1184 # determine if the repo changed. This is not precise, as mtimes
1195 # determine if the repo changed. This is not precise, as mtimes
1185 # are susceptible to clock skew and imprecise filesystems and
1196 # are susceptible to clock skew and imprecise filesystems and
1186 # file content can change while maintaining the same size.
1197 # file content can change while maintaining the same size.
1187
1198
1188 state, mtime = self._repostate()
1199 state, mtime = self._repostate()
1189 if state == self._state:
1200 if state == self._state:
1190 return self._repo, False
1201 return self._repo, False
1191
1202
1192 repo = repository(self._repo.baseui, self._repo.url())
1203 repo = repository(self._repo.baseui, self._repo.url())
1193 if self._filtername:
1204 if self._filtername:
1194 self._repo = repo.filtered(self._filtername)
1205 self._repo = repo.filtered(self._filtername)
1195 else:
1206 else:
1196 self._repo = repo.unfiltered()
1207 self._repo = repo.unfiltered()
1197 self._state = state
1208 self._state = state
1198 self.mtime = mtime
1209 self.mtime = mtime
1199
1210
1200 return self._repo, True
1211 return self._repo, True
1201
1212
1202 def _repostate(self):
1213 def _repostate(self):
1203 state = []
1214 state = []
1204 maxmtime = -1
1215 maxmtime = -1
1205 for attr, fname in foi:
1216 for attr, fname in foi:
1206 prefix = getattr(self._repo, attr)
1217 prefix = getattr(self._repo, attr)
1207 p = os.path.join(prefix, fname)
1218 p = os.path.join(prefix, fname)
1208 try:
1219 try:
1209 st = os.stat(p)
1220 st = os.stat(p)
1210 except OSError:
1221 except OSError:
1211 st = os.stat(prefix)
1222 st = os.stat(prefix)
1212 state.append((st[stat.ST_MTIME], st.st_size))
1223 state.append((st[stat.ST_MTIME], st.st_size))
1213 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1224 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1214
1225
1215 return tuple(state), maxmtime
1226 return tuple(state), maxmtime
1216
1227
1217 def copy(self):
1228 def copy(self):
1218 """Obtain a copy of this class instance.
1229 """Obtain a copy of this class instance.
1219
1230
1220 A new localrepository instance is obtained. The new instance should be
1231 A new localrepository instance is obtained. The new instance should be
1221 completely independent of the original.
1232 completely independent of the original.
1222 """
1233 """
1223 repo = repository(self._repo.baseui, self._repo.origroot)
1234 repo = repository(self._repo.baseui, self._repo.origroot)
1224 if self._filtername:
1235 if self._filtername:
1225 repo = repo.filtered(self._filtername)
1236 repo = repo.filtered(self._filtername)
1226 else:
1237 else:
1227 repo = repo.unfiltered()
1238 repo = repo.unfiltered()
1228 c = cachedlocalrepo(repo)
1239 c = cachedlocalrepo(repo)
1229 c._state = self._state
1240 c._state = self._state
1230 c.mtime = self.mtime
1241 c.mtime = self.mtime
1231 return c
1242 return c
@@ -1,2505 +1,2530 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 @interfaceutil.implementer(repository.completelocalrepository)
379 @interfaceutil.implementer(repository.completelocalrepository)
380 class localrepository(object):
380 class localrepository(object):
381
381
382 # obsolete experimental requirements:
382 # obsolete experimental requirements:
383 # - manifestv2: An experimental new manifest format that allowed
383 # - manifestv2: An experimental new manifest format that allowed
384 # for stem compression of long paths. Experiment ended up not
384 # for stem compression of long paths. Experiment ended up not
385 # being successful (repository sizes went up due to worse delta
385 # being successful (repository sizes went up due to worse delta
386 # chains), and the code was deleted in 4.6.
386 # chains), and the code was deleted in 4.6.
387 supportedformats = {
387 supportedformats = {
388 'revlogv1',
388 'revlogv1',
389 'generaldelta',
389 'generaldelta',
390 'treemanifest',
390 'treemanifest',
391 REVLOGV2_REQUIREMENT,
391 REVLOGV2_REQUIREMENT,
392 SPARSEREVLOG_REQUIREMENT,
392 SPARSEREVLOG_REQUIREMENT,
393 }
393 }
394 _basesupported = supportedformats | {
394 _basesupported = supportedformats | {
395 'store',
395 'store',
396 'fncache',
396 'fncache',
397 'shared',
397 'shared',
398 'relshared',
398 'relshared',
399 'dotencode',
399 'dotencode',
400 'exp-sparse',
400 'exp-sparse',
401 'internal-phase'
401 'internal-phase'
402 }
402 }
403 openerreqs = {
403 openerreqs = {
404 'revlogv1',
404 'revlogv1',
405 'generaldelta',
405 'generaldelta',
406 'treemanifest',
406 'treemanifest',
407 }
407 }
408
408
409 # list of prefix for file which can be written without 'wlock'
409 # list of prefix for file which can be written without 'wlock'
410 # Extensions should extend this list when needed
410 # Extensions should extend this list when needed
411 _wlockfreeprefix = {
411 _wlockfreeprefix = {
412 # We migh consider requiring 'wlock' for the next
412 # We migh consider requiring 'wlock' for the next
413 # two, but pretty much all the existing code assume
413 # two, but pretty much all the existing code assume
414 # wlock is not needed so we keep them excluded for
414 # wlock is not needed so we keep them excluded for
415 # now.
415 # now.
416 'hgrc',
416 'hgrc',
417 'requires',
417 'requires',
418 # XXX cache is a complicatged business someone
418 # XXX cache is a complicatged business someone
419 # should investigate this in depth at some point
419 # should investigate this in depth at some point
420 'cache/',
420 'cache/',
421 # XXX shouldn't be dirstate covered by the wlock?
421 # XXX shouldn't be dirstate covered by the wlock?
422 'dirstate',
422 'dirstate',
423 # XXX bisect was still a bit too messy at the time
423 # XXX bisect was still a bit too messy at the time
424 # this changeset was introduced. Someone should fix
424 # this changeset was introduced. Someone should fix
425 # the remainig bit and drop this line
425 # the remainig bit and drop this line
426 'bisect.state',
426 'bisect.state',
427 }
427 }
428
428
429 def __init__(self, baseui, path, intents=None):
429 def __init__(self, baseui, path, intents=None):
430 """Create a new local repository instance.
430 """Create a new local repository instance.
431
431
432 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
432 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
433 for obtaining a new repository object.
433 for obtaining a new repository object.
434 """
434 """
435
435
436 self.requirements = set()
436 self.requirements = set()
437 self.filtername = None
437 self.filtername = None
438 # wvfs: rooted at the repository root, used to access the working copy
438 # wvfs: rooted at the repository root, used to access the working copy
439 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
439 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
440 # vfs: rooted at .hg, used to access repo files outside of .hg/store
440 # vfs: rooted at .hg, used to access repo files outside of .hg/store
441 self.vfs = None
441 self.vfs = None
442 # svfs: usually rooted at .hg/store, used to access repository history
442 # svfs: usually rooted at .hg/store, used to access repository history
443 # If this is a shared repository, this vfs may point to another
443 # If this is a shared repository, this vfs may point to another
444 # repository's .hg/store directory.
444 # repository's .hg/store directory.
445 self.svfs = None
445 self.svfs = None
446 self.root = self.wvfs.base
446 self.root = self.wvfs.base
447 self.path = self.wvfs.join(".hg")
447 self.path = self.wvfs.join(".hg")
448 self.origroot = path
448 self.origroot = path
449 self.baseui = baseui
449 self.baseui = baseui
450 self.ui = baseui.copy()
450 self.ui = baseui.copy()
451 self.ui.copy = baseui.copy # prevent copying repo configuration
451 self.ui.copy = baseui.copy # prevent copying repo configuration
452 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
452 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
453 if (self.ui.configbool('devel', 'all-warnings') or
453 if (self.ui.configbool('devel', 'all-warnings') or
454 self.ui.configbool('devel', 'check-locks')):
454 self.ui.configbool('devel', 'check-locks')):
455 self.vfs.audit = self._getvfsward(self.vfs.audit)
455 self.vfs.audit = self._getvfsward(self.vfs.audit)
456 # A list of callback to shape the phase if no data were found.
456 # A list of callback to shape the phase if no data were found.
457 # Callback are in the form: func(repo, roots) --> processed root.
457 # Callback are in the form: func(repo, roots) --> processed root.
458 # This list it to be filled by extension during repo setup
458 # This list it to be filled by extension during repo setup
459 self._phasedefaults = []
459 self._phasedefaults = []
460 try:
460 try:
461 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
461 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
462 self._loadextensions()
462 self._loadextensions()
463 except IOError:
463 except IOError:
464 pass
464 pass
465
465
466 if featuresetupfuncs:
466 if featuresetupfuncs:
467 self.supported = set(self._basesupported) # use private copy
467 self.supported = set(self._basesupported) # use private copy
468 extmods = set(m.__name__ for n, m
468 extmods = set(m.__name__ for n, m
469 in extensions.extensions(self.ui))
469 in extensions.extensions(self.ui))
470 for setupfunc in featuresetupfuncs:
470 for setupfunc in featuresetupfuncs:
471 if setupfunc.__module__ in extmods:
471 if setupfunc.__module__ in extmods:
472 setupfunc(self.ui, self.supported)
472 setupfunc(self.ui, self.supported)
473 else:
473 else:
474 self.supported = self._basesupported
474 self.supported = self._basesupported
475 color.setup(self.ui)
475 color.setup(self.ui)
476
476
477 # Add compression engines.
477 # Add compression engines.
478 for name in util.compengines:
478 for name in util.compengines:
479 engine = util.compengines[name]
479 engine = util.compengines[name]
480 if engine.revlogheader():
480 if engine.revlogheader():
481 self.supported.add('exp-compression-%s' % name)
481 self.supported.add('exp-compression-%s' % name)
482
482
483 if not self.vfs.isdir():
483 if not self.vfs.isdir():
484 try:
484 try:
485 self.vfs.stat()
485 self.vfs.stat()
486 except OSError as inst:
486 except OSError as inst:
487 if inst.errno != errno.ENOENT:
487 if inst.errno != errno.ENOENT:
488 raise
488 raise
489 raise error.RepoError(_("repository %s not found") % path)
489 raise error.RepoError(_("repository %s not found") % path)
490 else:
490 else:
491 try:
491 try:
492 self.requirements = scmutil.readrequires(
492 self.requirements = scmutil.readrequires(
493 self.vfs, self.supported)
493 self.vfs, self.supported)
494 except IOError as inst:
494 except IOError as inst:
495 if inst.errno != errno.ENOENT:
495 if inst.errno != errno.ENOENT:
496 raise
496 raise
497
497
498 cachepath = self.vfs.join('cache')
498 cachepath = self.vfs.join('cache')
499 self.sharedpath = self.path
499 self.sharedpath = self.path
500 try:
500 try:
501 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
501 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
502 if 'relshared' in self.requirements:
502 if 'relshared' in self.requirements:
503 sharedpath = self.vfs.join(sharedpath)
503 sharedpath = self.vfs.join(sharedpath)
504 vfs = vfsmod.vfs(sharedpath, realpath=True)
504 vfs = vfsmod.vfs(sharedpath, realpath=True)
505 cachepath = vfs.join('cache')
505 cachepath = vfs.join('cache')
506 s = vfs.base
506 s = vfs.base
507 if not vfs.exists():
507 if not vfs.exists():
508 raise error.RepoError(
508 raise error.RepoError(
509 _('.hg/sharedpath points to nonexistent directory %s') % s)
509 _('.hg/sharedpath points to nonexistent directory %s') % s)
510 self.sharedpath = s
510 self.sharedpath = s
511 except IOError as inst:
511 except IOError as inst:
512 if inst.errno != errno.ENOENT:
512 if inst.errno != errno.ENOENT:
513 raise
513 raise
514
514
515 if 'exp-sparse' in self.requirements and not sparse.enabled:
515 if 'exp-sparse' in self.requirements and not sparse.enabled:
516 raise error.RepoError(_('repository is using sparse feature but '
516 raise error.RepoError(_('repository is using sparse feature but '
517 'sparse is not enabled; enable the '
517 'sparse is not enabled; enable the '
518 '"sparse" extensions to access'))
518 '"sparse" extensions to access'))
519
519
520 self.store = store.store(
520 self.store = store.store(
521 self.requirements, self.sharedpath,
521 self.requirements, self.sharedpath,
522 lambda base: vfsmod.vfs(base, cacheaudited=True))
522 lambda base: vfsmod.vfs(base, cacheaudited=True))
523 self.spath = self.store.path
523 self.spath = self.store.path
524 self.svfs = self.store.vfs
524 self.svfs = self.store.vfs
525 self.sjoin = self.store.join
525 self.sjoin = self.store.join
526 self.vfs.createmode = self.store.createmode
526 self.vfs.createmode = self.store.createmode
527 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
527 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
528 self.cachevfs.createmode = self.store.createmode
528 self.cachevfs.createmode = self.store.createmode
529 if (self.ui.configbool('devel', 'all-warnings') or
529 if (self.ui.configbool('devel', 'all-warnings') or
530 self.ui.configbool('devel', 'check-locks')):
530 self.ui.configbool('devel', 'check-locks')):
531 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
531 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
532 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
532 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
533 else: # standard vfs
533 else: # standard vfs
534 self.svfs.audit = self._getsvfsward(self.svfs.audit)
534 self.svfs.audit = self._getsvfsward(self.svfs.audit)
535 self._applyopenerreqs()
535 self._applyopenerreqs()
536
536
537 self._dirstatevalidatewarned = False
537 self._dirstatevalidatewarned = False
538
538
539 self._branchcaches = {}
539 self._branchcaches = {}
540 self._revbranchcache = None
540 self._revbranchcache = None
541 self._filterpats = {}
541 self._filterpats = {}
542 self._datafilters = {}
542 self._datafilters = {}
543 self._transref = self._lockref = self._wlockref = None
543 self._transref = self._lockref = self._wlockref = None
544
544
545 # A cache for various files under .hg/ that tracks file changes,
545 # A cache for various files under .hg/ that tracks file changes,
546 # (used by the filecache decorator)
546 # (used by the filecache decorator)
547 #
547 #
548 # Maps a property name to its util.filecacheentry
548 # Maps a property name to its util.filecacheentry
549 self._filecache = {}
549 self._filecache = {}
550
550
551 # hold sets of revision to be filtered
551 # hold sets of revision to be filtered
552 # should be cleared when something might have changed the filter value:
552 # should be cleared when something might have changed the filter value:
553 # - new changesets,
553 # - new changesets,
554 # - phase change,
554 # - phase change,
555 # - new obsolescence marker,
555 # - new obsolescence marker,
556 # - working directory parent change,
556 # - working directory parent change,
557 # - bookmark changes
557 # - bookmark changes
558 self.filteredrevcache = {}
558 self.filteredrevcache = {}
559
559
560 # post-dirstate-status hooks
560 # post-dirstate-status hooks
561 self._postdsstatus = []
561 self._postdsstatus = []
562
562
563 # generic mapping between names and nodes
563 # generic mapping between names and nodes
564 self.names = namespaces.namespaces()
564 self.names = namespaces.namespaces()
565
565
566 # Key to signature value.
566 # Key to signature value.
567 self._sparsesignaturecache = {}
567 self._sparsesignaturecache = {}
568 # Signature to cached matcher instance.
568 # Signature to cached matcher instance.
569 self._sparsematchercache = {}
569 self._sparsematchercache = {}
570
570
571 def _getvfsward(self, origfunc):
571 def _getvfsward(self, origfunc):
572 """build a ward for self.vfs"""
572 """build a ward for self.vfs"""
573 rref = weakref.ref(self)
573 rref = weakref.ref(self)
574 def checkvfs(path, mode=None):
574 def checkvfs(path, mode=None):
575 ret = origfunc(path, mode=mode)
575 ret = origfunc(path, mode=mode)
576 repo = rref()
576 repo = rref()
577 if (repo is None
577 if (repo is None
578 or not util.safehasattr(repo, '_wlockref')
578 or not util.safehasattr(repo, '_wlockref')
579 or not util.safehasattr(repo, '_lockref')):
579 or not util.safehasattr(repo, '_lockref')):
580 return
580 return
581 if mode in (None, 'r', 'rb'):
581 if mode in (None, 'r', 'rb'):
582 return
582 return
583 if path.startswith(repo.path):
583 if path.startswith(repo.path):
584 # truncate name relative to the repository (.hg)
584 # truncate name relative to the repository (.hg)
585 path = path[len(repo.path) + 1:]
585 path = path[len(repo.path) + 1:]
586 if path.startswith('cache/'):
586 if path.startswith('cache/'):
587 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
587 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
588 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
588 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
589 if path.startswith('journal.'):
589 if path.startswith('journal.'):
590 # journal is covered by 'lock'
590 # journal is covered by 'lock'
591 if repo._currentlock(repo._lockref) is None:
591 if repo._currentlock(repo._lockref) is None:
592 repo.ui.develwarn('write with no lock: "%s"' % path,
592 repo.ui.develwarn('write with no lock: "%s"' % path,
593 stacklevel=2, config='check-locks')
593 stacklevel=2, config='check-locks')
594 elif repo._currentlock(repo._wlockref) is None:
594 elif repo._currentlock(repo._wlockref) is None:
595 # rest of vfs files are covered by 'wlock'
595 # rest of vfs files are covered by 'wlock'
596 #
596 #
597 # exclude special files
597 # exclude special files
598 for prefix in self._wlockfreeprefix:
598 for prefix in self._wlockfreeprefix:
599 if path.startswith(prefix):
599 if path.startswith(prefix):
600 return
600 return
601 repo.ui.develwarn('write with no wlock: "%s"' % path,
601 repo.ui.develwarn('write with no wlock: "%s"' % path,
602 stacklevel=2, config='check-locks')
602 stacklevel=2, config='check-locks')
603 return ret
603 return ret
604 return checkvfs
604 return checkvfs
605
605
606 def _getsvfsward(self, origfunc):
606 def _getsvfsward(self, origfunc):
607 """build a ward for self.svfs"""
607 """build a ward for self.svfs"""
608 rref = weakref.ref(self)
608 rref = weakref.ref(self)
609 def checksvfs(path, mode=None):
609 def checksvfs(path, mode=None):
610 ret = origfunc(path, mode=mode)
610 ret = origfunc(path, mode=mode)
611 repo = rref()
611 repo = rref()
612 if repo is None or not util.safehasattr(repo, '_lockref'):
612 if repo is None or not util.safehasattr(repo, '_lockref'):
613 return
613 return
614 if mode in (None, 'r', 'rb'):
614 if mode in (None, 'r', 'rb'):
615 return
615 return
616 if path.startswith(repo.sharedpath):
616 if path.startswith(repo.sharedpath):
617 # truncate name relative to the repository (.hg)
617 # truncate name relative to the repository (.hg)
618 path = path[len(repo.sharedpath) + 1:]
618 path = path[len(repo.sharedpath) + 1:]
619 if repo._currentlock(repo._lockref) is None:
619 if repo._currentlock(repo._lockref) is None:
620 repo.ui.develwarn('write with no lock: "%s"' % path,
620 repo.ui.develwarn('write with no lock: "%s"' % path,
621 stacklevel=3)
621 stacklevel=3)
622 return ret
622 return ret
623 return checksvfs
623 return checksvfs
624
624
625 def close(self):
625 def close(self):
626 self._writecaches()
626 self._writecaches()
627
627
628 def _loadextensions(self):
628 def _loadextensions(self):
629 extensions.loadall(self.ui)
629 extensions.loadall(self.ui)
630
630
631 def _writecaches(self):
631 def _writecaches(self):
632 if self._revbranchcache:
632 if self._revbranchcache:
633 self._revbranchcache.write()
633 self._revbranchcache.write()
634
634
635 def _restrictcapabilities(self, caps):
635 def _restrictcapabilities(self, caps):
636 if self.ui.configbool('experimental', 'bundle2-advertise'):
636 if self.ui.configbool('experimental', 'bundle2-advertise'):
637 caps = set(caps)
637 caps = set(caps)
638 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
638 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
639 role='client'))
639 role='client'))
640 caps.add('bundle2=' + urlreq.quote(capsblob))
640 caps.add('bundle2=' + urlreq.quote(capsblob))
641 return caps
641 return caps
642
642
643 def _applyopenerreqs(self):
643 def _applyopenerreqs(self):
644 self.svfs.options = dict((r, 1) for r in self.requirements
644 self.svfs.options = dict((r, 1) for r in self.requirements
645 if r in self.openerreqs)
645 if r in self.openerreqs)
646 # experimental config: format.chunkcachesize
646 # experimental config: format.chunkcachesize
647 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
647 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
648 if chunkcachesize is not None:
648 if chunkcachesize is not None:
649 self.svfs.options['chunkcachesize'] = chunkcachesize
649 self.svfs.options['chunkcachesize'] = chunkcachesize
650 # experimental config: format.manifestcachesize
650 # experimental config: format.manifestcachesize
651 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
651 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
652 if manifestcachesize is not None:
652 if manifestcachesize is not None:
653 self.svfs.options['manifestcachesize'] = manifestcachesize
653 self.svfs.options['manifestcachesize'] = manifestcachesize
654 deltabothparents = self.ui.configbool('storage',
654 deltabothparents = self.ui.configbool('storage',
655 'revlog.optimize-delta-parent-choice')
655 'revlog.optimize-delta-parent-choice')
656 self.svfs.options['deltabothparents'] = deltabothparents
656 self.svfs.options['deltabothparents'] = deltabothparents
657 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
657 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
658 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
658 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
659 if 0 <= chainspan:
659 if 0 <= chainspan:
660 self.svfs.options['maxdeltachainspan'] = chainspan
660 self.svfs.options['maxdeltachainspan'] = chainspan
661 mmapindexthreshold = self.ui.configbytes('experimental',
661 mmapindexthreshold = self.ui.configbytes('experimental',
662 'mmapindexthreshold')
662 'mmapindexthreshold')
663 if mmapindexthreshold is not None:
663 if mmapindexthreshold is not None:
664 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
664 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
665 withsparseread = self.ui.configbool('experimental', 'sparse-read')
665 withsparseread = self.ui.configbool('experimental', 'sparse-read')
666 srdensitythres = float(self.ui.config('experimental',
666 srdensitythres = float(self.ui.config('experimental',
667 'sparse-read.density-threshold'))
667 'sparse-read.density-threshold'))
668 srmingapsize = self.ui.configbytes('experimental',
668 srmingapsize = self.ui.configbytes('experimental',
669 'sparse-read.min-gap-size')
669 'sparse-read.min-gap-size')
670 self.svfs.options['with-sparse-read'] = withsparseread
670 self.svfs.options['with-sparse-read'] = withsparseread
671 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
671 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
672 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
672 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
673 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
673 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
674 self.svfs.options['sparse-revlog'] = sparserevlog
674 self.svfs.options['sparse-revlog'] = sparserevlog
675 if sparserevlog:
675 if sparserevlog:
676 self.svfs.options['generaldelta'] = True
676 self.svfs.options['generaldelta'] = True
677 maxchainlen = None
677 maxchainlen = None
678 if sparserevlog:
678 if sparserevlog:
679 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
679 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
680 # experimental config: format.maxchainlen
680 # experimental config: format.maxchainlen
681 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
681 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
682 if maxchainlen is not None:
682 if maxchainlen is not None:
683 self.svfs.options['maxchainlen'] = maxchainlen
683 self.svfs.options['maxchainlen'] = maxchainlen
684
684
685 for r in self.requirements:
685 for r in self.requirements:
686 if r.startswith('exp-compression-'):
686 if r.startswith('exp-compression-'):
687 self.svfs.options['compengine'] = r[len('exp-compression-'):]
687 self.svfs.options['compengine'] = r[len('exp-compression-'):]
688
688
689 # TODO move "revlogv2" to openerreqs once finalized.
689 # TODO move "revlogv2" to openerreqs once finalized.
690 if REVLOGV2_REQUIREMENT in self.requirements:
690 if REVLOGV2_REQUIREMENT in self.requirements:
691 self.svfs.options['revlogv2'] = True
691 self.svfs.options['revlogv2'] = True
692
692
693 def _writerequirements(self):
693 def _writerequirements(self):
694 scmutil.writerequires(self.vfs, self.requirements)
694 scmutil.writerequires(self.vfs, self.requirements)
695
695
696 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
696 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
697 # self -> auditor -> self._checknested -> self
697 # self -> auditor -> self._checknested -> self
698
698
699 @property
699 @property
700 def auditor(self):
700 def auditor(self):
701 # This is only used by context.workingctx.match in order to
701 # This is only used by context.workingctx.match in order to
702 # detect files in subrepos.
702 # detect files in subrepos.
703 return pathutil.pathauditor(self.root, callback=self._checknested)
703 return pathutil.pathauditor(self.root, callback=self._checknested)
704
704
705 @property
705 @property
706 def nofsauditor(self):
706 def nofsauditor(self):
707 # This is only used by context.basectx.match in order to detect
707 # This is only used by context.basectx.match in order to detect
708 # files in subrepos.
708 # files in subrepos.
709 return pathutil.pathauditor(self.root, callback=self._checknested,
709 return pathutil.pathauditor(self.root, callback=self._checknested,
710 realfs=False, cached=True)
710 realfs=False, cached=True)
711
711
712 def _checknested(self, path):
712 def _checknested(self, path):
713 """Determine if path is a legal nested repository."""
713 """Determine if path is a legal nested repository."""
714 if not path.startswith(self.root):
714 if not path.startswith(self.root):
715 return False
715 return False
716 subpath = path[len(self.root) + 1:]
716 subpath = path[len(self.root) + 1:]
717 normsubpath = util.pconvert(subpath)
717 normsubpath = util.pconvert(subpath)
718
718
719 # XXX: Checking against the current working copy is wrong in
719 # XXX: Checking against the current working copy is wrong in
720 # the sense that it can reject things like
720 # the sense that it can reject things like
721 #
721 #
722 # $ hg cat -r 10 sub/x.txt
722 # $ hg cat -r 10 sub/x.txt
723 #
723 #
724 # if sub/ is no longer a subrepository in the working copy
724 # if sub/ is no longer a subrepository in the working copy
725 # parent revision.
725 # parent revision.
726 #
726 #
727 # However, it can of course also allow things that would have
727 # However, it can of course also allow things that would have
728 # been rejected before, such as the above cat command if sub/
728 # been rejected before, such as the above cat command if sub/
729 # is a subrepository now, but was a normal directory before.
729 # is a subrepository now, but was a normal directory before.
730 # The old path auditor would have rejected by mistake since it
730 # The old path auditor would have rejected by mistake since it
731 # panics when it sees sub/.hg/.
731 # panics when it sees sub/.hg/.
732 #
732 #
733 # All in all, checking against the working copy seems sensible
733 # All in all, checking against the working copy seems sensible
734 # since we want to prevent access to nested repositories on
734 # since we want to prevent access to nested repositories on
735 # the filesystem *now*.
735 # the filesystem *now*.
736 ctx = self[None]
736 ctx = self[None]
737 parts = util.splitpath(subpath)
737 parts = util.splitpath(subpath)
738 while parts:
738 while parts:
739 prefix = '/'.join(parts)
739 prefix = '/'.join(parts)
740 if prefix in ctx.substate:
740 if prefix in ctx.substate:
741 if prefix == normsubpath:
741 if prefix == normsubpath:
742 return True
742 return True
743 else:
743 else:
744 sub = ctx.sub(prefix)
744 sub = ctx.sub(prefix)
745 return sub.checknested(subpath[len(prefix) + 1:])
745 return sub.checknested(subpath[len(prefix) + 1:])
746 else:
746 else:
747 parts.pop()
747 parts.pop()
748 return False
748 return False
749
749
750 def peer(self):
750 def peer(self):
751 return localpeer(self) # not cached to avoid reference cycle
751 return localpeer(self) # not cached to avoid reference cycle
752
752
753 def unfiltered(self):
753 def unfiltered(self):
754 """Return unfiltered version of the repository
754 """Return unfiltered version of the repository
755
755
756 Intended to be overwritten by filtered repo."""
756 Intended to be overwritten by filtered repo."""
757 return self
757 return self
758
758
759 def filtered(self, name, visibilityexceptions=None):
759 def filtered(self, name, visibilityexceptions=None):
760 """Return a filtered version of a repository"""
760 """Return a filtered version of a repository"""
761 cls = repoview.newtype(self.unfiltered().__class__)
761 cls = repoview.newtype(self.unfiltered().__class__)
762 return cls(self, name, visibilityexceptions)
762 return cls(self, name, visibilityexceptions)
763
763
764 @repofilecache('bookmarks', 'bookmarks.current')
764 @repofilecache('bookmarks', 'bookmarks.current')
765 def _bookmarks(self):
765 def _bookmarks(self):
766 return bookmarks.bmstore(self)
766 return bookmarks.bmstore(self)
767
767
768 @property
768 @property
769 def _activebookmark(self):
769 def _activebookmark(self):
770 return self._bookmarks.active
770 return self._bookmarks.active
771
771
772 # _phasesets depend on changelog. what we need is to call
772 # _phasesets depend on changelog. what we need is to call
773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
774 # can't be easily expressed in filecache mechanism.
774 # can't be easily expressed in filecache mechanism.
775 @storecache('phaseroots', '00changelog.i')
775 @storecache('phaseroots', '00changelog.i')
776 def _phasecache(self):
776 def _phasecache(self):
777 return phases.phasecache(self, self._phasedefaults)
777 return phases.phasecache(self, self._phasedefaults)
778
778
779 @storecache('obsstore')
779 @storecache('obsstore')
780 def obsstore(self):
780 def obsstore(self):
781 return obsolete.makestore(self.ui, self)
781 return obsolete.makestore(self.ui, self)
782
782
783 @storecache('00changelog.i')
783 @storecache('00changelog.i')
784 def changelog(self):
784 def changelog(self):
785 return changelog.changelog(self.svfs,
785 return changelog.changelog(self.svfs,
786 trypending=txnutil.mayhavepending(self.root))
786 trypending=txnutil.mayhavepending(self.root))
787
787
788 def _constructmanifest(self):
788 def _constructmanifest(self):
789 # This is a temporary function while we migrate from manifest to
789 # This is a temporary function while we migrate from manifest to
790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
791 # manifest creation.
791 # manifest creation.
792 return manifest.manifestrevlog(self.svfs)
792 return manifest.manifestrevlog(self.svfs)
793
793
794 @storecache('00manifest.i')
794 @storecache('00manifest.i')
795 def manifestlog(self):
795 def manifestlog(self):
796 return manifest.manifestlog(self.svfs, self)
796 return manifest.manifestlog(self.svfs, self)
797
797
798 @repofilecache('dirstate')
798 @repofilecache('dirstate')
799 def dirstate(self):
799 def dirstate(self):
800 return self._makedirstate()
800 return self._makedirstate()
801
801
802 def _makedirstate(self):
802 def _makedirstate(self):
803 """Extension point for wrapping the dirstate per-repo."""
803 """Extension point for wrapping the dirstate per-repo."""
804 sparsematchfn = lambda: sparse.matcher(self)
804 sparsematchfn = lambda: sparse.matcher(self)
805
805
806 return dirstate.dirstate(self.vfs, self.ui, self.root,
806 return dirstate.dirstate(self.vfs, self.ui, self.root,
807 self._dirstatevalidate, sparsematchfn)
807 self._dirstatevalidate, sparsematchfn)
808
808
809 def _dirstatevalidate(self, node):
809 def _dirstatevalidate(self, node):
810 try:
810 try:
811 self.changelog.rev(node)
811 self.changelog.rev(node)
812 return node
812 return node
813 except error.LookupError:
813 except error.LookupError:
814 if not self._dirstatevalidatewarned:
814 if not self._dirstatevalidatewarned:
815 self._dirstatevalidatewarned = True
815 self._dirstatevalidatewarned = True
816 self.ui.warn(_("warning: ignoring unknown"
816 self.ui.warn(_("warning: ignoring unknown"
817 " working parent %s!\n") % short(node))
817 " working parent %s!\n") % short(node))
818 return nullid
818 return nullid
819
819
820 @storecache(narrowspec.FILENAME)
820 @storecache(narrowspec.FILENAME)
821 def narrowpats(self):
821 def narrowpats(self):
822 """matcher patterns for this repository's narrowspec
822 """matcher patterns for this repository's narrowspec
823
823
824 A tuple of (includes, excludes).
824 A tuple of (includes, excludes).
825 """
825 """
826 source = self
826 source = self
827 if self.shared():
827 if self.shared():
828 from . import hg
828 from . import hg
829 source = hg.sharedreposource(self)
829 source = hg.sharedreposource(self)
830 return narrowspec.load(source)
830 return narrowspec.load(source)
831
831
832 @storecache(narrowspec.FILENAME)
832 @storecache(narrowspec.FILENAME)
833 def _narrowmatch(self):
833 def _narrowmatch(self):
834 if repository.NARROW_REQUIREMENT not in self.requirements:
834 if repository.NARROW_REQUIREMENT not in self.requirements:
835 return matchmod.always(self.root, '')
835 return matchmod.always(self.root, '')
836 include, exclude = self.narrowpats
836 include, exclude = self.narrowpats
837 return narrowspec.match(self.root, include=include, exclude=exclude)
837 return narrowspec.match(self.root, include=include, exclude=exclude)
838
838
839 # TODO(martinvonz): make this property-like instead?
839 # TODO(martinvonz): make this property-like instead?
840 def narrowmatch(self):
840 def narrowmatch(self):
841 return self._narrowmatch
841 return self._narrowmatch
842
842
843 def setnarrowpats(self, newincludes, newexcludes):
843 def setnarrowpats(self, newincludes, newexcludes):
844 narrowspec.save(self, newincludes, newexcludes)
844 narrowspec.save(self, newincludes, newexcludes)
845 self.invalidate(clearfilecache=True)
845 self.invalidate(clearfilecache=True)
846
846
847 def __getitem__(self, changeid):
847 def __getitem__(self, changeid):
848 if changeid is None:
848 if changeid is None:
849 return context.workingctx(self)
849 return context.workingctx(self)
850 if isinstance(changeid, context.basectx):
850 if isinstance(changeid, context.basectx):
851 return changeid
851 return changeid
852 if isinstance(changeid, slice):
852 if isinstance(changeid, slice):
853 # wdirrev isn't contiguous so the slice shouldn't include it
853 # wdirrev isn't contiguous so the slice shouldn't include it
854 return [context.changectx(self, i)
854 return [context.changectx(self, i)
855 for i in pycompat.xrange(*changeid.indices(len(self)))
855 for i in pycompat.xrange(*changeid.indices(len(self)))
856 if i not in self.changelog.filteredrevs]
856 if i not in self.changelog.filteredrevs]
857 try:
857 try:
858 return context.changectx(self, changeid)
858 return context.changectx(self, changeid)
859 except error.WdirUnsupported:
859 except error.WdirUnsupported:
860 return context.workingctx(self)
860 return context.workingctx(self)
861
861
862 def __contains__(self, changeid):
862 def __contains__(self, changeid):
863 """True if the given changeid exists
863 """True if the given changeid exists
864
864
865 error.AmbiguousPrefixLookupError is raised if an ambiguous node
865 error.AmbiguousPrefixLookupError is raised if an ambiguous node
866 specified.
866 specified.
867 """
867 """
868 try:
868 try:
869 self[changeid]
869 self[changeid]
870 return True
870 return True
871 except error.RepoLookupError:
871 except error.RepoLookupError:
872 return False
872 return False
873
873
874 def __nonzero__(self):
874 def __nonzero__(self):
875 return True
875 return True
876
876
877 __bool__ = __nonzero__
877 __bool__ = __nonzero__
878
878
879 def __len__(self):
879 def __len__(self):
880 # no need to pay the cost of repoview.changelog
880 # no need to pay the cost of repoview.changelog
881 unfi = self.unfiltered()
881 unfi = self.unfiltered()
882 return len(unfi.changelog)
882 return len(unfi.changelog)
883
883
884 def __iter__(self):
884 def __iter__(self):
885 return iter(self.changelog)
885 return iter(self.changelog)
886
886
887 def revs(self, expr, *args):
887 def revs(self, expr, *args):
888 '''Find revisions matching a revset.
888 '''Find revisions matching a revset.
889
889
890 The revset is specified as a string ``expr`` that may contain
890 The revset is specified as a string ``expr`` that may contain
891 %-formatting to escape certain types. See ``revsetlang.formatspec``.
891 %-formatting to escape certain types. See ``revsetlang.formatspec``.
892
892
893 Revset aliases from the configuration are not expanded. To expand
893 Revset aliases from the configuration are not expanded. To expand
894 user aliases, consider calling ``scmutil.revrange()`` or
894 user aliases, consider calling ``scmutil.revrange()`` or
895 ``repo.anyrevs([expr], user=True)``.
895 ``repo.anyrevs([expr], user=True)``.
896
896
897 Returns a revset.abstractsmartset, which is a list-like interface
897 Returns a revset.abstractsmartset, which is a list-like interface
898 that contains integer revisions.
898 that contains integer revisions.
899 '''
899 '''
900 expr = revsetlang.formatspec(expr, *args)
900 expr = revsetlang.formatspec(expr, *args)
901 m = revset.match(None, expr)
901 m = revset.match(None, expr)
902 return m(self)
902 return m(self)
903
903
904 def set(self, expr, *args):
904 def set(self, expr, *args):
905 '''Find revisions matching a revset and emit changectx instances.
905 '''Find revisions matching a revset and emit changectx instances.
906
906
907 This is a convenience wrapper around ``revs()`` that iterates the
907 This is a convenience wrapper around ``revs()`` that iterates the
908 result and is a generator of changectx instances.
908 result and is a generator of changectx instances.
909
909
910 Revset aliases from the configuration are not expanded. To expand
910 Revset aliases from the configuration are not expanded. To expand
911 user aliases, consider calling ``scmutil.revrange()``.
911 user aliases, consider calling ``scmutil.revrange()``.
912 '''
912 '''
913 for r in self.revs(expr, *args):
913 for r in self.revs(expr, *args):
914 yield self[r]
914 yield self[r]
915
915
916 def anyrevs(self, specs, user=False, localalias=None):
916 def anyrevs(self, specs, user=False, localalias=None):
917 '''Find revisions matching one of the given revsets.
917 '''Find revisions matching one of the given revsets.
918
918
919 Revset aliases from the configuration are not expanded by default. To
919 Revset aliases from the configuration are not expanded by default. To
920 expand user aliases, specify ``user=True``. To provide some local
920 expand user aliases, specify ``user=True``. To provide some local
921 definitions overriding user aliases, set ``localalias`` to
921 definitions overriding user aliases, set ``localalias`` to
922 ``{name: definitionstring}``.
922 ``{name: definitionstring}``.
923 '''
923 '''
924 if user:
924 if user:
925 m = revset.matchany(self.ui, specs,
925 m = revset.matchany(self.ui, specs,
926 lookup=revset.lookupfn(self),
926 lookup=revset.lookupfn(self),
927 localalias=localalias)
927 localalias=localalias)
928 else:
928 else:
929 m = revset.matchany(None, specs, localalias=localalias)
929 m = revset.matchany(None, specs, localalias=localalias)
930 return m(self)
930 return m(self)
931
931
932 def url(self):
932 def url(self):
933 return 'file:' + self.root
933 return 'file:' + self.root
934
934
935 def hook(self, name, throw=False, **args):
935 def hook(self, name, throw=False, **args):
936 """Call a hook, passing this repo instance.
936 """Call a hook, passing this repo instance.
937
937
938 This a convenience method to aid invoking hooks. Extensions likely
938 This a convenience method to aid invoking hooks. Extensions likely
939 won't call this unless they have registered a custom hook or are
939 won't call this unless they have registered a custom hook or are
940 replacing code that is expected to call a hook.
940 replacing code that is expected to call a hook.
941 """
941 """
942 return hook.hook(self.ui, self, name, throw, **args)
942 return hook.hook(self.ui, self, name, throw, **args)
943
943
944 @filteredpropertycache
944 @filteredpropertycache
945 def _tagscache(self):
945 def _tagscache(self):
946 '''Returns a tagscache object that contains various tags related
946 '''Returns a tagscache object that contains various tags related
947 caches.'''
947 caches.'''
948
948
949 # This simplifies its cache management by having one decorated
949 # This simplifies its cache management by having one decorated
950 # function (this one) and the rest simply fetch things from it.
950 # function (this one) and the rest simply fetch things from it.
951 class tagscache(object):
951 class tagscache(object):
952 def __init__(self):
952 def __init__(self):
953 # These two define the set of tags for this repository. tags
953 # These two define the set of tags for this repository. tags
954 # maps tag name to node; tagtypes maps tag name to 'global' or
954 # maps tag name to node; tagtypes maps tag name to 'global' or
955 # 'local'. (Global tags are defined by .hgtags across all
955 # 'local'. (Global tags are defined by .hgtags across all
956 # heads, and local tags are defined in .hg/localtags.)
956 # heads, and local tags are defined in .hg/localtags.)
957 # They constitute the in-memory cache of tags.
957 # They constitute the in-memory cache of tags.
958 self.tags = self.tagtypes = None
958 self.tags = self.tagtypes = None
959
959
960 self.nodetagscache = self.tagslist = None
960 self.nodetagscache = self.tagslist = None
961
961
962 cache = tagscache()
962 cache = tagscache()
963 cache.tags, cache.tagtypes = self._findtags()
963 cache.tags, cache.tagtypes = self._findtags()
964
964
965 return cache
965 return cache
966
966
967 def tags(self):
967 def tags(self):
968 '''return a mapping of tag to node'''
968 '''return a mapping of tag to node'''
969 t = {}
969 t = {}
970 if self.changelog.filteredrevs:
970 if self.changelog.filteredrevs:
971 tags, tt = self._findtags()
971 tags, tt = self._findtags()
972 else:
972 else:
973 tags = self._tagscache.tags
973 tags = self._tagscache.tags
974 for k, v in tags.iteritems():
974 for k, v in tags.iteritems():
975 try:
975 try:
976 # ignore tags to unknown nodes
976 # ignore tags to unknown nodes
977 self.changelog.rev(v)
977 self.changelog.rev(v)
978 t[k] = v
978 t[k] = v
979 except (error.LookupError, ValueError):
979 except (error.LookupError, ValueError):
980 pass
980 pass
981 return t
981 return t
982
982
983 def _findtags(self):
983 def _findtags(self):
984 '''Do the hard work of finding tags. Return a pair of dicts
984 '''Do the hard work of finding tags. Return a pair of dicts
985 (tags, tagtypes) where tags maps tag name to node, and tagtypes
985 (tags, tagtypes) where tags maps tag name to node, and tagtypes
986 maps tag name to a string like \'global\' or \'local\'.
986 maps tag name to a string like \'global\' or \'local\'.
987 Subclasses or extensions are free to add their own tags, but
987 Subclasses or extensions are free to add their own tags, but
988 should be aware that the returned dicts will be retained for the
988 should be aware that the returned dicts will be retained for the
989 duration of the localrepo object.'''
989 duration of the localrepo object.'''
990
990
991 # XXX what tagtype should subclasses/extensions use? Currently
991 # XXX what tagtype should subclasses/extensions use? Currently
992 # mq and bookmarks add tags, but do not set the tagtype at all.
992 # mq and bookmarks add tags, but do not set the tagtype at all.
993 # Should each extension invent its own tag type? Should there
993 # Should each extension invent its own tag type? Should there
994 # be one tagtype for all such "virtual" tags? Or is the status
994 # be one tagtype for all such "virtual" tags? Or is the status
995 # quo fine?
995 # quo fine?
996
996
997
997
998 # map tag name to (node, hist)
998 # map tag name to (node, hist)
999 alltags = tagsmod.findglobaltags(self.ui, self)
999 alltags = tagsmod.findglobaltags(self.ui, self)
1000 # map tag name to tag type
1000 # map tag name to tag type
1001 tagtypes = dict((tag, 'global') for tag in alltags)
1001 tagtypes = dict((tag, 'global') for tag in alltags)
1002
1002
1003 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1003 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1004
1004
1005 # Build the return dicts. Have to re-encode tag names because
1005 # Build the return dicts. Have to re-encode tag names because
1006 # the tags module always uses UTF-8 (in order not to lose info
1006 # the tags module always uses UTF-8 (in order not to lose info
1007 # writing to the cache), but the rest of Mercurial wants them in
1007 # writing to the cache), but the rest of Mercurial wants them in
1008 # local encoding.
1008 # local encoding.
1009 tags = {}
1009 tags = {}
1010 for (name, (node, hist)) in alltags.iteritems():
1010 for (name, (node, hist)) in alltags.iteritems():
1011 if node != nullid:
1011 if node != nullid:
1012 tags[encoding.tolocal(name)] = node
1012 tags[encoding.tolocal(name)] = node
1013 tags['tip'] = self.changelog.tip()
1013 tags['tip'] = self.changelog.tip()
1014 tagtypes = dict([(encoding.tolocal(name), value)
1014 tagtypes = dict([(encoding.tolocal(name), value)
1015 for (name, value) in tagtypes.iteritems()])
1015 for (name, value) in tagtypes.iteritems()])
1016 return (tags, tagtypes)
1016 return (tags, tagtypes)
1017
1017
1018 def tagtype(self, tagname):
1018 def tagtype(self, tagname):
1019 '''
1019 '''
1020 return the type of the given tag. result can be:
1020 return the type of the given tag. result can be:
1021
1021
1022 'local' : a local tag
1022 'local' : a local tag
1023 'global' : a global tag
1023 'global' : a global tag
1024 None : tag does not exist
1024 None : tag does not exist
1025 '''
1025 '''
1026
1026
1027 return self._tagscache.tagtypes.get(tagname)
1027 return self._tagscache.tagtypes.get(tagname)
1028
1028
1029 def tagslist(self):
1029 def tagslist(self):
1030 '''return a list of tags ordered by revision'''
1030 '''return a list of tags ordered by revision'''
1031 if not self._tagscache.tagslist:
1031 if not self._tagscache.tagslist:
1032 l = []
1032 l = []
1033 for t, n in self.tags().iteritems():
1033 for t, n in self.tags().iteritems():
1034 l.append((self.changelog.rev(n), t, n))
1034 l.append((self.changelog.rev(n), t, n))
1035 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1035 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1036
1036
1037 return self._tagscache.tagslist
1037 return self._tagscache.tagslist
1038
1038
1039 def nodetags(self, node):
1039 def nodetags(self, node):
1040 '''return the tags associated with a node'''
1040 '''return the tags associated with a node'''
1041 if not self._tagscache.nodetagscache:
1041 if not self._tagscache.nodetagscache:
1042 nodetagscache = {}
1042 nodetagscache = {}
1043 for t, n in self._tagscache.tags.iteritems():
1043 for t, n in self._tagscache.tags.iteritems():
1044 nodetagscache.setdefault(n, []).append(t)
1044 nodetagscache.setdefault(n, []).append(t)
1045 for tags in nodetagscache.itervalues():
1045 for tags in nodetagscache.itervalues():
1046 tags.sort()
1046 tags.sort()
1047 self._tagscache.nodetagscache = nodetagscache
1047 self._tagscache.nodetagscache = nodetagscache
1048 return self._tagscache.nodetagscache.get(node, [])
1048 return self._tagscache.nodetagscache.get(node, [])
1049
1049
1050 def nodebookmarks(self, node):
1050 def nodebookmarks(self, node):
1051 """return the list of bookmarks pointing to the specified node"""
1051 """return the list of bookmarks pointing to the specified node"""
1052 return self._bookmarks.names(node)
1052 return self._bookmarks.names(node)
1053
1053
1054 def branchmap(self):
1054 def branchmap(self):
1055 '''returns a dictionary {branch: [branchheads]} with branchheads
1055 '''returns a dictionary {branch: [branchheads]} with branchheads
1056 ordered by increasing revision number'''
1056 ordered by increasing revision number'''
1057 branchmap.updatecache(self)
1057 branchmap.updatecache(self)
1058 return self._branchcaches[self.filtername]
1058 return self._branchcaches[self.filtername]
1059
1059
1060 @unfilteredmethod
1060 @unfilteredmethod
1061 def revbranchcache(self):
1061 def revbranchcache(self):
1062 if not self._revbranchcache:
1062 if not self._revbranchcache:
1063 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1063 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1064 return self._revbranchcache
1064 return self._revbranchcache
1065
1065
1066 def branchtip(self, branch, ignoremissing=False):
1066 def branchtip(self, branch, ignoremissing=False):
1067 '''return the tip node for a given branch
1067 '''return the tip node for a given branch
1068
1068
1069 If ignoremissing is True, then this method will not raise an error.
1069 If ignoremissing is True, then this method will not raise an error.
1070 This is helpful for callers that only expect None for a missing branch
1070 This is helpful for callers that only expect None for a missing branch
1071 (e.g. namespace).
1071 (e.g. namespace).
1072
1072
1073 '''
1073 '''
1074 try:
1074 try:
1075 return self.branchmap().branchtip(branch)
1075 return self.branchmap().branchtip(branch)
1076 except KeyError:
1076 except KeyError:
1077 if not ignoremissing:
1077 if not ignoremissing:
1078 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1078 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1079 else:
1079 else:
1080 pass
1080 pass
1081
1081
1082 def lookup(self, key):
1082 def lookup(self, key):
1083 return scmutil.revsymbol(self, key).node()
1083 return scmutil.revsymbol(self, key).node()
1084
1084
1085 def lookupbranch(self, key):
1085 def lookupbranch(self, key):
1086 if key in self.branchmap():
1086 if key in self.branchmap():
1087 return key
1087 return key
1088
1088
1089 return scmutil.revsymbol(self, key).branch()
1089 return scmutil.revsymbol(self, key).branch()
1090
1090
1091 def known(self, nodes):
1091 def known(self, nodes):
1092 cl = self.changelog
1092 cl = self.changelog
1093 nm = cl.nodemap
1093 nm = cl.nodemap
1094 filtered = cl.filteredrevs
1094 filtered = cl.filteredrevs
1095 result = []
1095 result = []
1096 for n in nodes:
1096 for n in nodes:
1097 r = nm.get(n)
1097 r = nm.get(n)
1098 resp = not (r is None or r in filtered)
1098 resp = not (r is None or r in filtered)
1099 result.append(resp)
1099 result.append(resp)
1100 return result
1100 return result
1101
1101
1102 def local(self):
1102 def local(self):
1103 return self
1103 return self
1104
1104
1105 def publishing(self):
1105 def publishing(self):
1106 # it's safe (and desirable) to trust the publish flag unconditionally
1106 # it's safe (and desirable) to trust the publish flag unconditionally
1107 # so that we don't finalize changes shared between users via ssh or nfs
1107 # so that we don't finalize changes shared between users via ssh or nfs
1108 return self.ui.configbool('phases', 'publish', untrusted=True)
1108 return self.ui.configbool('phases', 'publish', untrusted=True)
1109
1109
1110 def cancopy(self):
1110 def cancopy(self):
1111 # so statichttprepo's override of local() works
1111 # so statichttprepo's override of local() works
1112 if not self.local():
1112 if not self.local():
1113 return False
1113 return False
1114 if not self.publishing():
1114 if not self.publishing():
1115 return True
1115 return True
1116 # if publishing we can't copy if there is filtered content
1116 # if publishing we can't copy if there is filtered content
1117 return not self.filtered('visible').changelog.filteredrevs
1117 return not self.filtered('visible').changelog.filteredrevs
1118
1118
1119 def shared(self):
1119 def shared(self):
1120 '''the type of shared repository (None if not shared)'''
1120 '''the type of shared repository (None if not shared)'''
1121 if self.sharedpath != self.path:
1121 if self.sharedpath != self.path:
1122 return 'store'
1122 return 'store'
1123 return None
1123 return None
1124
1124
1125 def wjoin(self, f, *insidef):
1125 def wjoin(self, f, *insidef):
1126 return self.vfs.reljoin(self.root, f, *insidef)
1126 return self.vfs.reljoin(self.root, f, *insidef)
1127
1127
1128 def file(self, f):
1128 def file(self, f):
1129 if f[0] == '/':
1129 if f[0] == '/':
1130 f = f[1:]
1130 f = f[1:]
1131 return filelog.filelog(self.svfs, f)
1131 return filelog.filelog(self.svfs, f)
1132
1132
1133 def setparents(self, p1, p2=nullid):
1133 def setparents(self, p1, p2=nullid):
1134 with self.dirstate.parentchange():
1134 with self.dirstate.parentchange():
1135 copies = self.dirstate.setparents(p1, p2)
1135 copies = self.dirstate.setparents(p1, p2)
1136 pctx = self[p1]
1136 pctx = self[p1]
1137 if copies:
1137 if copies:
1138 # Adjust copy records, the dirstate cannot do it, it
1138 # Adjust copy records, the dirstate cannot do it, it
1139 # requires access to parents manifests. Preserve them
1139 # requires access to parents manifests. Preserve them
1140 # only for entries added to first parent.
1140 # only for entries added to first parent.
1141 for f in copies:
1141 for f in copies:
1142 if f not in pctx and copies[f] in pctx:
1142 if f not in pctx and copies[f] in pctx:
1143 self.dirstate.copy(copies[f], f)
1143 self.dirstate.copy(copies[f], f)
1144 if p2 == nullid:
1144 if p2 == nullid:
1145 for f, s in sorted(self.dirstate.copies().items()):
1145 for f, s in sorted(self.dirstate.copies().items()):
1146 if f not in pctx and s not in pctx:
1146 if f not in pctx and s not in pctx:
1147 self.dirstate.copy(None, f)
1147 self.dirstate.copy(None, f)
1148
1148
1149 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1149 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1150 """changeid can be a changeset revision, node, or tag.
1150 """changeid can be a changeset revision, node, or tag.
1151 fileid can be a file revision or node."""
1151 fileid can be a file revision or node."""
1152 return context.filectx(self, path, changeid, fileid,
1152 return context.filectx(self, path, changeid, fileid,
1153 changectx=changectx)
1153 changectx=changectx)
1154
1154
1155 def getcwd(self):
1155 def getcwd(self):
1156 return self.dirstate.getcwd()
1156 return self.dirstate.getcwd()
1157
1157
1158 def pathto(self, f, cwd=None):
1158 def pathto(self, f, cwd=None):
1159 return self.dirstate.pathto(f, cwd)
1159 return self.dirstate.pathto(f, cwd)
1160
1160
1161 def _loadfilter(self, filter):
1161 def _loadfilter(self, filter):
1162 if filter not in self._filterpats:
1162 if filter not in self._filterpats:
1163 l = []
1163 l = []
1164 for pat, cmd in self.ui.configitems(filter):
1164 for pat, cmd in self.ui.configitems(filter):
1165 if cmd == '!':
1165 if cmd == '!':
1166 continue
1166 continue
1167 mf = matchmod.match(self.root, '', [pat])
1167 mf = matchmod.match(self.root, '', [pat])
1168 fn = None
1168 fn = None
1169 params = cmd
1169 params = cmd
1170 for name, filterfn in self._datafilters.iteritems():
1170 for name, filterfn in self._datafilters.iteritems():
1171 if cmd.startswith(name):
1171 if cmd.startswith(name):
1172 fn = filterfn
1172 fn = filterfn
1173 params = cmd[len(name):].lstrip()
1173 params = cmd[len(name):].lstrip()
1174 break
1174 break
1175 if not fn:
1175 if not fn:
1176 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1176 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1177 # Wrap old filters not supporting keyword arguments
1177 # Wrap old filters not supporting keyword arguments
1178 if not pycompat.getargspec(fn)[2]:
1178 if not pycompat.getargspec(fn)[2]:
1179 oldfn = fn
1179 oldfn = fn
1180 fn = lambda s, c, **kwargs: oldfn(s, c)
1180 fn = lambda s, c, **kwargs: oldfn(s, c)
1181 l.append((mf, fn, params))
1181 l.append((mf, fn, params))
1182 self._filterpats[filter] = l
1182 self._filterpats[filter] = l
1183 return self._filterpats[filter]
1183 return self._filterpats[filter]
1184
1184
1185 def _filter(self, filterpats, filename, data):
1185 def _filter(self, filterpats, filename, data):
1186 for mf, fn, cmd in filterpats:
1186 for mf, fn, cmd in filterpats:
1187 if mf(filename):
1187 if mf(filename):
1188 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1188 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1189 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1189 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1190 break
1190 break
1191
1191
1192 return data
1192 return data
1193
1193
1194 @unfilteredpropertycache
1194 @unfilteredpropertycache
1195 def _encodefilterpats(self):
1195 def _encodefilterpats(self):
1196 return self._loadfilter('encode')
1196 return self._loadfilter('encode')
1197
1197
1198 @unfilteredpropertycache
1198 @unfilteredpropertycache
1199 def _decodefilterpats(self):
1199 def _decodefilterpats(self):
1200 return self._loadfilter('decode')
1200 return self._loadfilter('decode')
1201
1201
1202 def adddatafilter(self, name, filter):
1202 def adddatafilter(self, name, filter):
1203 self._datafilters[name] = filter
1203 self._datafilters[name] = filter
1204
1204
1205 def wread(self, filename):
1205 def wread(self, filename):
1206 if self.wvfs.islink(filename):
1206 if self.wvfs.islink(filename):
1207 data = self.wvfs.readlink(filename)
1207 data = self.wvfs.readlink(filename)
1208 else:
1208 else:
1209 data = self.wvfs.read(filename)
1209 data = self.wvfs.read(filename)
1210 return self._filter(self._encodefilterpats, filename, data)
1210 return self._filter(self._encodefilterpats, filename, data)
1211
1211
1212 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1212 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1213 """write ``data`` into ``filename`` in the working directory
1213 """write ``data`` into ``filename`` in the working directory
1214
1214
1215 This returns length of written (maybe decoded) data.
1215 This returns length of written (maybe decoded) data.
1216 """
1216 """
1217 data = self._filter(self._decodefilterpats, filename, data)
1217 data = self._filter(self._decodefilterpats, filename, data)
1218 if 'l' in flags:
1218 if 'l' in flags:
1219 self.wvfs.symlink(data, filename)
1219 self.wvfs.symlink(data, filename)
1220 else:
1220 else:
1221 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1221 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1222 **kwargs)
1222 **kwargs)
1223 if 'x' in flags:
1223 if 'x' in flags:
1224 self.wvfs.setflags(filename, False, True)
1224 self.wvfs.setflags(filename, False, True)
1225 else:
1225 else:
1226 self.wvfs.setflags(filename, False, False)
1226 self.wvfs.setflags(filename, False, False)
1227 return len(data)
1227 return len(data)
1228
1228
1229 def wwritedata(self, filename, data):
1229 def wwritedata(self, filename, data):
1230 return self._filter(self._decodefilterpats, filename, data)
1230 return self._filter(self._decodefilterpats, filename, data)
1231
1231
1232 def currenttransaction(self):
1232 def currenttransaction(self):
1233 """return the current transaction or None if non exists"""
1233 """return the current transaction or None if non exists"""
1234 if self._transref:
1234 if self._transref:
1235 tr = self._transref()
1235 tr = self._transref()
1236 else:
1236 else:
1237 tr = None
1237 tr = None
1238
1238
1239 if tr and tr.running():
1239 if tr and tr.running():
1240 return tr
1240 return tr
1241 return None
1241 return None
1242
1242
1243 def transaction(self, desc, report=None):
1243 def transaction(self, desc, report=None):
1244 if (self.ui.configbool('devel', 'all-warnings')
1244 if (self.ui.configbool('devel', 'all-warnings')
1245 or self.ui.configbool('devel', 'check-locks')):
1245 or self.ui.configbool('devel', 'check-locks')):
1246 if self._currentlock(self._lockref) is None:
1246 if self._currentlock(self._lockref) is None:
1247 raise error.ProgrammingError('transaction requires locking')
1247 raise error.ProgrammingError('transaction requires locking')
1248 tr = self.currenttransaction()
1248 tr = self.currenttransaction()
1249 if tr is not None:
1249 if tr is not None:
1250 return tr.nest(name=desc)
1250 return tr.nest(name=desc)
1251
1251
1252 # abort here if the journal already exists
1252 # abort here if the journal already exists
1253 if self.svfs.exists("journal"):
1253 if self.svfs.exists("journal"):
1254 raise error.RepoError(
1254 raise error.RepoError(
1255 _("abandoned transaction found"),
1255 _("abandoned transaction found"),
1256 hint=_("run 'hg recover' to clean up transaction"))
1256 hint=_("run 'hg recover' to clean up transaction"))
1257
1257
1258 idbase = "%.40f#%f" % (random.random(), time.time())
1258 idbase = "%.40f#%f" % (random.random(), time.time())
1259 ha = hex(hashlib.sha1(idbase).digest())
1259 ha = hex(hashlib.sha1(idbase).digest())
1260 txnid = 'TXN:' + ha
1260 txnid = 'TXN:' + ha
1261 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1261 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1262
1262
1263 self._writejournal(desc)
1263 self._writejournal(desc)
1264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1265 if report:
1265 if report:
1266 rp = report
1266 rp = report
1267 else:
1267 else:
1268 rp = self.ui.warn
1268 rp = self.ui.warn
1269 vfsmap = {'plain': self.vfs} # root of .hg/
1269 vfsmap = {'plain': self.vfs} # root of .hg/
1270 # we must avoid cyclic reference between repo and transaction.
1270 # we must avoid cyclic reference between repo and transaction.
1271 reporef = weakref.ref(self)
1271 reporef = weakref.ref(self)
1272 # Code to track tag movement
1272 # Code to track tag movement
1273 #
1273 #
1274 # Since tags are all handled as file content, it is actually quite hard
1274 # Since tags are all handled as file content, it is actually quite hard
1275 # to track these movement from a code perspective. So we fallback to a
1275 # to track these movement from a code perspective. So we fallback to a
1276 # tracking at the repository level. One could envision to track changes
1276 # tracking at the repository level. One could envision to track changes
1277 # to the '.hgtags' file through changegroup apply but that fails to
1277 # to the '.hgtags' file through changegroup apply but that fails to
1278 # cope with case where transaction expose new heads without changegroup
1278 # cope with case where transaction expose new heads without changegroup
1279 # being involved (eg: phase movement).
1279 # being involved (eg: phase movement).
1280 #
1280 #
1281 # For now, We gate the feature behind a flag since this likely comes
1281 # For now, We gate the feature behind a flag since this likely comes
1282 # with performance impacts. The current code run more often than needed
1282 # with performance impacts. The current code run more often than needed
1283 # and do not use caches as much as it could. The current focus is on
1283 # and do not use caches as much as it could. The current focus is on
1284 # the behavior of the feature so we disable it by default. The flag
1284 # the behavior of the feature so we disable it by default. The flag
1285 # will be removed when we are happy with the performance impact.
1285 # will be removed when we are happy with the performance impact.
1286 #
1286 #
1287 # Once this feature is no longer experimental move the following
1287 # Once this feature is no longer experimental move the following
1288 # documentation to the appropriate help section:
1288 # documentation to the appropriate help section:
1289 #
1289 #
1290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1291 # tags (new or changed or deleted tags). In addition the details of
1291 # tags (new or changed or deleted tags). In addition the details of
1292 # these changes are made available in a file at:
1292 # these changes are made available in a file at:
1293 # ``REPOROOT/.hg/changes/tags.changes``.
1293 # ``REPOROOT/.hg/changes/tags.changes``.
1294 # Make sure you check for HG_TAG_MOVED before reading that file as it
1294 # Make sure you check for HG_TAG_MOVED before reading that file as it
1295 # might exist from a previous transaction even if no tag were touched
1295 # might exist from a previous transaction even if no tag were touched
1296 # in this one. Changes are recorded in a line base format::
1296 # in this one. Changes are recorded in a line base format::
1297 #
1297 #
1298 # <action> <hex-node> <tag-name>\n
1298 # <action> <hex-node> <tag-name>\n
1299 #
1299 #
1300 # Actions are defined as follow:
1300 # Actions are defined as follow:
1301 # "-R": tag is removed,
1301 # "-R": tag is removed,
1302 # "+A": tag is added,
1302 # "+A": tag is added,
1303 # "-M": tag is moved (old value),
1303 # "-M": tag is moved (old value),
1304 # "+M": tag is moved (new value),
1304 # "+M": tag is moved (new value),
1305 tracktags = lambda x: None
1305 tracktags = lambda x: None
1306 # experimental config: experimental.hook-track-tags
1306 # experimental config: experimental.hook-track-tags
1307 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1307 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1308 if desc != 'strip' and shouldtracktags:
1308 if desc != 'strip' and shouldtracktags:
1309 oldheads = self.changelog.headrevs()
1309 oldheads = self.changelog.headrevs()
1310 def tracktags(tr2):
1310 def tracktags(tr2):
1311 repo = reporef()
1311 repo = reporef()
1312 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1312 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1313 newheads = repo.changelog.headrevs()
1313 newheads = repo.changelog.headrevs()
1314 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1314 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1315 # notes: we compare lists here.
1315 # notes: we compare lists here.
1316 # As we do it only once buiding set would not be cheaper
1316 # As we do it only once buiding set would not be cheaper
1317 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1317 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1318 if changes:
1318 if changes:
1319 tr2.hookargs['tag_moved'] = '1'
1319 tr2.hookargs['tag_moved'] = '1'
1320 with repo.vfs('changes/tags.changes', 'w',
1320 with repo.vfs('changes/tags.changes', 'w',
1321 atomictemp=True) as changesfile:
1321 atomictemp=True) as changesfile:
1322 # note: we do not register the file to the transaction
1322 # note: we do not register the file to the transaction
1323 # because we needs it to still exist on the transaction
1323 # because we needs it to still exist on the transaction
1324 # is close (for txnclose hooks)
1324 # is close (for txnclose hooks)
1325 tagsmod.writediff(changesfile, changes)
1325 tagsmod.writediff(changesfile, changes)
1326 def validate(tr2):
1326 def validate(tr2):
1327 """will run pre-closing hooks"""
1327 """will run pre-closing hooks"""
1328 # XXX the transaction API is a bit lacking here so we take a hacky
1328 # XXX the transaction API is a bit lacking here so we take a hacky
1329 # path for now
1329 # path for now
1330 #
1330 #
1331 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1331 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1332 # dict is copied before these run. In addition we needs the data
1332 # dict is copied before these run. In addition we needs the data
1333 # available to in memory hooks too.
1333 # available to in memory hooks too.
1334 #
1334 #
1335 # Moreover, we also need to make sure this runs before txnclose
1335 # Moreover, we also need to make sure this runs before txnclose
1336 # hooks and there is no "pending" mechanism that would execute
1336 # hooks and there is no "pending" mechanism that would execute
1337 # logic only if hooks are about to run.
1337 # logic only if hooks are about to run.
1338 #
1338 #
1339 # Fixing this limitation of the transaction is also needed to track
1339 # Fixing this limitation of the transaction is also needed to track
1340 # other families of changes (bookmarks, phases, obsolescence).
1340 # other families of changes (bookmarks, phases, obsolescence).
1341 #
1341 #
1342 # This will have to be fixed before we remove the experimental
1342 # This will have to be fixed before we remove the experimental
1343 # gating.
1343 # gating.
1344 tracktags(tr2)
1344 tracktags(tr2)
1345 repo = reporef()
1345 repo = reporef()
1346 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1346 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1347 scmutil.enforcesinglehead(repo, tr2, desc)
1347 scmutil.enforcesinglehead(repo, tr2, desc)
1348 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1348 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1349 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1349 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1350 args = tr.hookargs.copy()
1350 args = tr.hookargs.copy()
1351 args.update(bookmarks.preparehookargs(name, old, new))
1351 args.update(bookmarks.preparehookargs(name, old, new))
1352 repo.hook('pretxnclose-bookmark', throw=True,
1352 repo.hook('pretxnclose-bookmark', throw=True,
1353 txnname=desc,
1353 txnname=desc,
1354 **pycompat.strkwargs(args))
1354 **pycompat.strkwargs(args))
1355 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1355 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1356 cl = repo.unfiltered().changelog
1356 cl = repo.unfiltered().changelog
1357 for rev, (old, new) in tr.changes['phases'].items():
1357 for rev, (old, new) in tr.changes['phases'].items():
1358 args = tr.hookargs.copy()
1358 args = tr.hookargs.copy()
1359 node = hex(cl.node(rev))
1359 node = hex(cl.node(rev))
1360 args.update(phases.preparehookargs(node, old, new))
1360 args.update(phases.preparehookargs(node, old, new))
1361 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1361 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1362 **pycompat.strkwargs(args))
1362 **pycompat.strkwargs(args))
1363
1363
1364 repo.hook('pretxnclose', throw=True,
1364 repo.hook('pretxnclose', throw=True,
1365 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1365 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1366 def releasefn(tr, success):
1366 def releasefn(tr, success):
1367 repo = reporef()
1367 repo = reporef()
1368 if success:
1368 if success:
1369 # this should be explicitly invoked here, because
1369 # this should be explicitly invoked here, because
1370 # in-memory changes aren't written out at closing
1370 # in-memory changes aren't written out at closing
1371 # transaction, if tr.addfilegenerator (via
1371 # transaction, if tr.addfilegenerator (via
1372 # dirstate.write or so) isn't invoked while
1372 # dirstate.write or so) isn't invoked while
1373 # transaction running
1373 # transaction running
1374 repo.dirstate.write(None)
1374 repo.dirstate.write(None)
1375 else:
1375 else:
1376 # discard all changes (including ones already written
1376 # discard all changes (including ones already written
1377 # out) in this transaction
1377 # out) in this transaction
1378 narrowspec.restorebackup(self, 'journal.narrowspec')
1378 narrowspec.restorebackup(self, 'journal.narrowspec')
1379 repo.dirstate.restorebackup(None, 'journal.dirstate')
1379 repo.dirstate.restorebackup(None, 'journal.dirstate')
1380
1380
1381 repo.invalidate(clearfilecache=True)
1381 repo.invalidate(clearfilecache=True)
1382
1382
1383 tr = transaction.transaction(rp, self.svfs, vfsmap,
1383 tr = transaction.transaction(rp, self.svfs, vfsmap,
1384 "journal",
1384 "journal",
1385 "undo",
1385 "undo",
1386 aftertrans(renames),
1386 aftertrans(renames),
1387 self.store.createmode,
1387 self.store.createmode,
1388 validator=validate,
1388 validator=validate,
1389 releasefn=releasefn,
1389 releasefn=releasefn,
1390 checkambigfiles=_cachedfiles,
1390 checkambigfiles=_cachedfiles,
1391 name=desc)
1391 name=desc)
1392 tr.changes['origrepolen'] = len(self)
1392 tr.changes['origrepolen'] = len(self)
1393 tr.changes['obsmarkers'] = set()
1393 tr.changes['obsmarkers'] = set()
1394 tr.changes['phases'] = {}
1394 tr.changes['phases'] = {}
1395 tr.changes['bookmarks'] = {}
1395 tr.changes['bookmarks'] = {}
1396
1396
1397 tr.hookargs['txnid'] = txnid
1397 tr.hookargs['txnid'] = txnid
1398 # note: writing the fncache only during finalize mean that the file is
1398 # note: writing the fncache only during finalize mean that the file is
1399 # outdated when running hooks. As fncache is used for streaming clone,
1399 # outdated when running hooks. As fncache is used for streaming clone,
1400 # this is not expected to break anything that happen during the hooks.
1400 # this is not expected to break anything that happen during the hooks.
1401 tr.addfinalize('flush-fncache', self.store.write)
1401 tr.addfinalize('flush-fncache', self.store.write)
1402 def txnclosehook(tr2):
1402 def txnclosehook(tr2):
1403 """To be run if transaction is successful, will schedule a hook run
1403 """To be run if transaction is successful, will schedule a hook run
1404 """
1404 """
1405 # Don't reference tr2 in hook() so we don't hold a reference.
1405 # Don't reference tr2 in hook() so we don't hold a reference.
1406 # This reduces memory consumption when there are multiple
1406 # This reduces memory consumption when there are multiple
1407 # transactions per lock. This can likely go away if issue5045
1407 # transactions per lock. This can likely go away if issue5045
1408 # fixes the function accumulation.
1408 # fixes the function accumulation.
1409 hookargs = tr2.hookargs
1409 hookargs = tr2.hookargs
1410
1410
1411 def hookfunc():
1411 def hookfunc():
1412 repo = reporef()
1412 repo = reporef()
1413 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1413 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1414 bmchanges = sorted(tr.changes['bookmarks'].items())
1414 bmchanges = sorted(tr.changes['bookmarks'].items())
1415 for name, (old, new) in bmchanges:
1415 for name, (old, new) in bmchanges:
1416 args = tr.hookargs.copy()
1416 args = tr.hookargs.copy()
1417 args.update(bookmarks.preparehookargs(name, old, new))
1417 args.update(bookmarks.preparehookargs(name, old, new))
1418 repo.hook('txnclose-bookmark', throw=False,
1418 repo.hook('txnclose-bookmark', throw=False,
1419 txnname=desc, **pycompat.strkwargs(args))
1419 txnname=desc, **pycompat.strkwargs(args))
1420
1420
1421 if hook.hashook(repo.ui, 'txnclose-phase'):
1421 if hook.hashook(repo.ui, 'txnclose-phase'):
1422 cl = repo.unfiltered().changelog
1422 cl = repo.unfiltered().changelog
1423 phasemv = sorted(tr.changes['phases'].items())
1423 phasemv = sorted(tr.changes['phases'].items())
1424 for rev, (old, new) in phasemv:
1424 for rev, (old, new) in phasemv:
1425 args = tr.hookargs.copy()
1425 args = tr.hookargs.copy()
1426 node = hex(cl.node(rev))
1426 node = hex(cl.node(rev))
1427 args.update(phases.preparehookargs(node, old, new))
1427 args.update(phases.preparehookargs(node, old, new))
1428 repo.hook('txnclose-phase', throw=False, txnname=desc,
1428 repo.hook('txnclose-phase', throw=False, txnname=desc,
1429 **pycompat.strkwargs(args))
1429 **pycompat.strkwargs(args))
1430
1430
1431 repo.hook('txnclose', throw=False, txnname=desc,
1431 repo.hook('txnclose', throw=False, txnname=desc,
1432 **pycompat.strkwargs(hookargs))
1432 **pycompat.strkwargs(hookargs))
1433 reporef()._afterlock(hookfunc)
1433 reporef()._afterlock(hookfunc)
1434 tr.addfinalize('txnclose-hook', txnclosehook)
1434 tr.addfinalize('txnclose-hook', txnclosehook)
1435 # Include a leading "-" to make it happen before the transaction summary
1435 # Include a leading "-" to make it happen before the transaction summary
1436 # reports registered via scmutil.registersummarycallback() whose names
1436 # reports registered via scmutil.registersummarycallback() whose names
1437 # are 00-txnreport etc. That way, the caches will be warm when the
1437 # are 00-txnreport etc. That way, the caches will be warm when the
1438 # callbacks run.
1438 # callbacks run.
1439 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1439 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1440 def txnaborthook(tr2):
1440 def txnaborthook(tr2):
1441 """To be run if transaction is aborted
1441 """To be run if transaction is aborted
1442 """
1442 """
1443 reporef().hook('txnabort', throw=False, txnname=desc,
1443 reporef().hook('txnabort', throw=False, txnname=desc,
1444 **pycompat.strkwargs(tr2.hookargs))
1444 **pycompat.strkwargs(tr2.hookargs))
1445 tr.addabort('txnabort-hook', txnaborthook)
1445 tr.addabort('txnabort-hook', txnaborthook)
1446 # avoid eager cache invalidation. in-memory data should be identical
1446 # avoid eager cache invalidation. in-memory data should be identical
1447 # to stored data if transaction has no error.
1447 # to stored data if transaction has no error.
1448 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1448 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1449 self._transref = weakref.ref(tr)
1449 self._transref = weakref.ref(tr)
1450 scmutil.registersummarycallback(self, tr, desc)
1450 scmutil.registersummarycallback(self, tr, desc)
1451 return tr
1451 return tr
1452
1452
1453 def _journalfiles(self):
1453 def _journalfiles(self):
1454 return ((self.svfs, 'journal'),
1454 return ((self.svfs, 'journal'),
1455 (self.vfs, 'journal.dirstate'),
1455 (self.vfs, 'journal.dirstate'),
1456 (self.vfs, 'journal.branch'),
1456 (self.vfs, 'journal.branch'),
1457 (self.vfs, 'journal.desc'),
1457 (self.vfs, 'journal.desc'),
1458 (self.vfs, 'journal.bookmarks'),
1458 (self.vfs, 'journal.bookmarks'),
1459 (self.svfs, 'journal.phaseroots'))
1459 (self.svfs, 'journal.phaseroots'))
1460
1460
1461 def undofiles(self):
1461 def undofiles(self):
1462 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1462 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1463
1463
1464 @unfilteredmethod
1464 @unfilteredmethod
1465 def _writejournal(self, desc):
1465 def _writejournal(self, desc):
1466 self.dirstate.savebackup(None, 'journal.dirstate')
1466 self.dirstate.savebackup(None, 'journal.dirstate')
1467 narrowspec.savebackup(self, 'journal.narrowspec')
1467 narrowspec.savebackup(self, 'journal.narrowspec')
1468 self.vfs.write("journal.branch",
1468 self.vfs.write("journal.branch",
1469 encoding.fromlocal(self.dirstate.branch()))
1469 encoding.fromlocal(self.dirstate.branch()))
1470 self.vfs.write("journal.desc",
1470 self.vfs.write("journal.desc",
1471 "%d\n%s\n" % (len(self), desc))
1471 "%d\n%s\n" % (len(self), desc))
1472 self.vfs.write("journal.bookmarks",
1472 self.vfs.write("journal.bookmarks",
1473 self.vfs.tryread("bookmarks"))
1473 self.vfs.tryread("bookmarks"))
1474 self.svfs.write("journal.phaseroots",
1474 self.svfs.write("journal.phaseroots",
1475 self.svfs.tryread("phaseroots"))
1475 self.svfs.tryread("phaseroots"))
1476
1476
1477 def recover(self):
1477 def recover(self):
1478 with self.lock():
1478 with self.lock():
1479 if self.svfs.exists("journal"):
1479 if self.svfs.exists("journal"):
1480 self.ui.status(_("rolling back interrupted transaction\n"))
1480 self.ui.status(_("rolling back interrupted transaction\n"))
1481 vfsmap = {'': self.svfs,
1481 vfsmap = {'': self.svfs,
1482 'plain': self.vfs,}
1482 'plain': self.vfs,}
1483 transaction.rollback(self.svfs, vfsmap, "journal",
1483 transaction.rollback(self.svfs, vfsmap, "journal",
1484 self.ui.warn,
1484 self.ui.warn,
1485 checkambigfiles=_cachedfiles)
1485 checkambigfiles=_cachedfiles)
1486 self.invalidate()
1486 self.invalidate()
1487 return True
1487 return True
1488 else:
1488 else:
1489 self.ui.warn(_("no interrupted transaction available\n"))
1489 self.ui.warn(_("no interrupted transaction available\n"))
1490 return False
1490 return False
1491
1491
1492 def rollback(self, dryrun=False, force=False):
1492 def rollback(self, dryrun=False, force=False):
1493 wlock = lock = dsguard = None
1493 wlock = lock = dsguard = None
1494 try:
1494 try:
1495 wlock = self.wlock()
1495 wlock = self.wlock()
1496 lock = self.lock()
1496 lock = self.lock()
1497 if self.svfs.exists("undo"):
1497 if self.svfs.exists("undo"):
1498 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1498 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1499
1499
1500 return self._rollback(dryrun, force, dsguard)
1500 return self._rollback(dryrun, force, dsguard)
1501 else:
1501 else:
1502 self.ui.warn(_("no rollback information available\n"))
1502 self.ui.warn(_("no rollback information available\n"))
1503 return 1
1503 return 1
1504 finally:
1504 finally:
1505 release(dsguard, lock, wlock)
1505 release(dsguard, lock, wlock)
1506
1506
1507 @unfilteredmethod # Until we get smarter cache management
1507 @unfilteredmethod # Until we get smarter cache management
1508 def _rollback(self, dryrun, force, dsguard):
1508 def _rollback(self, dryrun, force, dsguard):
1509 ui = self.ui
1509 ui = self.ui
1510 try:
1510 try:
1511 args = self.vfs.read('undo.desc').splitlines()
1511 args = self.vfs.read('undo.desc').splitlines()
1512 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1512 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1513 if len(args) >= 3:
1513 if len(args) >= 3:
1514 detail = args[2]
1514 detail = args[2]
1515 oldtip = oldlen - 1
1515 oldtip = oldlen - 1
1516
1516
1517 if detail and ui.verbose:
1517 if detail and ui.verbose:
1518 msg = (_('repository tip rolled back to revision %d'
1518 msg = (_('repository tip rolled back to revision %d'
1519 ' (undo %s: %s)\n')
1519 ' (undo %s: %s)\n')
1520 % (oldtip, desc, detail))
1520 % (oldtip, desc, detail))
1521 else:
1521 else:
1522 msg = (_('repository tip rolled back to revision %d'
1522 msg = (_('repository tip rolled back to revision %d'
1523 ' (undo %s)\n')
1523 ' (undo %s)\n')
1524 % (oldtip, desc))
1524 % (oldtip, desc))
1525 except IOError:
1525 except IOError:
1526 msg = _('rolling back unknown transaction\n')
1526 msg = _('rolling back unknown transaction\n')
1527 desc = None
1527 desc = None
1528
1528
1529 if not force and self['.'] != self['tip'] and desc == 'commit':
1529 if not force and self['.'] != self['tip'] and desc == 'commit':
1530 raise error.Abort(
1530 raise error.Abort(
1531 _('rollback of last commit while not checked out '
1531 _('rollback of last commit while not checked out '
1532 'may lose data'), hint=_('use -f to force'))
1532 'may lose data'), hint=_('use -f to force'))
1533
1533
1534 ui.status(msg)
1534 ui.status(msg)
1535 if dryrun:
1535 if dryrun:
1536 return 0
1536 return 0
1537
1537
1538 parents = self.dirstate.parents()
1538 parents = self.dirstate.parents()
1539 self.destroying()
1539 self.destroying()
1540 vfsmap = {'plain': self.vfs, '': self.svfs}
1540 vfsmap = {'plain': self.vfs, '': self.svfs}
1541 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1541 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1542 checkambigfiles=_cachedfiles)
1542 checkambigfiles=_cachedfiles)
1543 if self.vfs.exists('undo.bookmarks'):
1543 if self.vfs.exists('undo.bookmarks'):
1544 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1544 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1545 if self.svfs.exists('undo.phaseroots'):
1545 if self.svfs.exists('undo.phaseroots'):
1546 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1546 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1547 self.invalidate()
1547 self.invalidate()
1548
1548
1549 parentgone = (parents[0] not in self.changelog.nodemap or
1549 parentgone = (parents[0] not in self.changelog.nodemap or
1550 parents[1] not in self.changelog.nodemap)
1550 parents[1] not in self.changelog.nodemap)
1551 if parentgone:
1551 if parentgone:
1552 # prevent dirstateguard from overwriting already restored one
1552 # prevent dirstateguard from overwriting already restored one
1553 dsguard.close()
1553 dsguard.close()
1554
1554
1555 narrowspec.restorebackup(self, 'undo.narrowspec')
1555 narrowspec.restorebackup(self, 'undo.narrowspec')
1556 self.dirstate.restorebackup(None, 'undo.dirstate')
1556 self.dirstate.restorebackup(None, 'undo.dirstate')
1557 try:
1557 try:
1558 branch = self.vfs.read('undo.branch')
1558 branch = self.vfs.read('undo.branch')
1559 self.dirstate.setbranch(encoding.tolocal(branch))
1559 self.dirstate.setbranch(encoding.tolocal(branch))
1560 except IOError:
1560 except IOError:
1561 ui.warn(_('named branch could not be reset: '
1561 ui.warn(_('named branch could not be reset: '
1562 'current branch is still \'%s\'\n')
1562 'current branch is still \'%s\'\n')
1563 % self.dirstate.branch())
1563 % self.dirstate.branch())
1564
1564
1565 parents = tuple([p.rev() for p in self[None].parents()])
1565 parents = tuple([p.rev() for p in self[None].parents()])
1566 if len(parents) > 1:
1566 if len(parents) > 1:
1567 ui.status(_('working directory now based on '
1567 ui.status(_('working directory now based on '
1568 'revisions %d and %d\n') % parents)
1568 'revisions %d and %d\n') % parents)
1569 else:
1569 else:
1570 ui.status(_('working directory now based on '
1570 ui.status(_('working directory now based on '
1571 'revision %d\n') % parents)
1571 'revision %d\n') % parents)
1572 mergemod.mergestate.clean(self, self['.'].node())
1572 mergemod.mergestate.clean(self, self['.'].node())
1573
1573
1574 # TODO: if we know which new heads may result from this rollback, pass
1574 # TODO: if we know which new heads may result from this rollback, pass
1575 # them to destroy(), which will prevent the branchhead cache from being
1575 # them to destroy(), which will prevent the branchhead cache from being
1576 # invalidated.
1576 # invalidated.
1577 self.destroyed()
1577 self.destroyed()
1578 return 0
1578 return 0
1579
1579
1580 def _buildcacheupdater(self, newtransaction):
1580 def _buildcacheupdater(self, newtransaction):
1581 """called during transaction to build the callback updating cache
1581 """called during transaction to build the callback updating cache
1582
1582
1583 Lives on the repository to help extension who might want to augment
1583 Lives on the repository to help extension who might want to augment
1584 this logic. For this purpose, the created transaction is passed to the
1584 this logic. For this purpose, the created transaction is passed to the
1585 method.
1585 method.
1586 """
1586 """
1587 # we must avoid cyclic reference between repo and transaction.
1587 # we must avoid cyclic reference between repo and transaction.
1588 reporef = weakref.ref(self)
1588 reporef = weakref.ref(self)
1589 def updater(tr):
1589 def updater(tr):
1590 repo = reporef()
1590 repo = reporef()
1591 repo.updatecaches(tr)
1591 repo.updatecaches(tr)
1592 return updater
1592 return updater
1593
1593
1594 @unfilteredmethod
1594 @unfilteredmethod
1595 def updatecaches(self, tr=None, full=False):
1595 def updatecaches(self, tr=None, full=False):
1596 """warm appropriate caches
1596 """warm appropriate caches
1597
1597
1598 If this function is called after a transaction closed. The transaction
1598 If this function is called after a transaction closed. The transaction
1599 will be available in the 'tr' argument. This can be used to selectively
1599 will be available in the 'tr' argument. This can be used to selectively
1600 update caches relevant to the changes in that transaction.
1600 update caches relevant to the changes in that transaction.
1601
1601
1602 If 'full' is set, make sure all caches the function knows about have
1602 If 'full' is set, make sure all caches the function knows about have
1603 up-to-date data. Even the ones usually loaded more lazily.
1603 up-to-date data. Even the ones usually loaded more lazily.
1604 """
1604 """
1605 if tr is not None and tr.hookargs.get('source') == 'strip':
1605 if tr is not None and tr.hookargs.get('source') == 'strip':
1606 # During strip, many caches are invalid but
1606 # During strip, many caches are invalid but
1607 # later call to `destroyed` will refresh them.
1607 # later call to `destroyed` will refresh them.
1608 return
1608 return
1609
1609
1610 if tr is None or tr.changes['origrepolen'] < len(self):
1610 if tr is None or tr.changes['origrepolen'] < len(self):
1611 # updating the unfiltered branchmap should refresh all the others,
1611 # updating the unfiltered branchmap should refresh all the others,
1612 self.ui.debug('updating the branch cache\n')
1612 self.ui.debug('updating the branch cache\n')
1613 branchmap.updatecache(self.filtered('served'))
1613 branchmap.updatecache(self.filtered('served'))
1614
1614
1615 if full:
1615 if full:
1616 rbc = self.revbranchcache()
1616 rbc = self.revbranchcache()
1617 for r in self.changelog:
1617 for r in self.changelog:
1618 rbc.branchinfo(r)
1618 rbc.branchinfo(r)
1619 rbc.write()
1619 rbc.write()
1620
1620
1621 # ensure the working copy parents are in the manifestfulltextcache
1621 # ensure the working copy parents are in the manifestfulltextcache
1622 for ctx in self['.'].parents():
1622 for ctx in self['.'].parents():
1623 ctx.manifest() # accessing the manifest is enough
1623 ctx.manifest() # accessing the manifest is enough
1624
1624
1625 def invalidatecaches(self):
1625 def invalidatecaches(self):
1626
1626
1627 if '_tagscache' in vars(self):
1627 if '_tagscache' in vars(self):
1628 # can't use delattr on proxy
1628 # can't use delattr on proxy
1629 del self.__dict__['_tagscache']
1629 del self.__dict__['_tagscache']
1630
1630
1631 self.unfiltered()._branchcaches.clear()
1631 self.unfiltered()._branchcaches.clear()
1632 self.invalidatevolatilesets()
1632 self.invalidatevolatilesets()
1633 self._sparsesignaturecache.clear()
1633 self._sparsesignaturecache.clear()
1634
1634
1635 def invalidatevolatilesets(self):
1635 def invalidatevolatilesets(self):
1636 self.filteredrevcache.clear()
1636 self.filteredrevcache.clear()
1637 obsolete.clearobscaches(self)
1637 obsolete.clearobscaches(self)
1638
1638
1639 def invalidatedirstate(self):
1639 def invalidatedirstate(self):
1640 '''Invalidates the dirstate, causing the next call to dirstate
1640 '''Invalidates the dirstate, causing the next call to dirstate
1641 to check if it was modified since the last time it was read,
1641 to check if it was modified since the last time it was read,
1642 rereading it if it has.
1642 rereading it if it has.
1643
1643
1644 This is different to dirstate.invalidate() that it doesn't always
1644 This is different to dirstate.invalidate() that it doesn't always
1645 rereads the dirstate. Use dirstate.invalidate() if you want to
1645 rereads the dirstate. Use dirstate.invalidate() if you want to
1646 explicitly read the dirstate again (i.e. restoring it to a previous
1646 explicitly read the dirstate again (i.e. restoring it to a previous
1647 known good state).'''
1647 known good state).'''
1648 if hasunfilteredcache(self, 'dirstate'):
1648 if hasunfilteredcache(self, 'dirstate'):
1649 for k in self.dirstate._filecache:
1649 for k in self.dirstate._filecache:
1650 try:
1650 try:
1651 delattr(self.dirstate, k)
1651 delattr(self.dirstate, k)
1652 except AttributeError:
1652 except AttributeError:
1653 pass
1653 pass
1654 delattr(self.unfiltered(), 'dirstate')
1654 delattr(self.unfiltered(), 'dirstate')
1655
1655
1656 def invalidate(self, clearfilecache=False):
1656 def invalidate(self, clearfilecache=False):
1657 '''Invalidates both store and non-store parts other than dirstate
1657 '''Invalidates both store and non-store parts other than dirstate
1658
1658
1659 If a transaction is running, invalidation of store is omitted,
1659 If a transaction is running, invalidation of store is omitted,
1660 because discarding in-memory changes might cause inconsistency
1660 because discarding in-memory changes might cause inconsistency
1661 (e.g. incomplete fncache causes unintentional failure, but
1661 (e.g. incomplete fncache causes unintentional failure, but
1662 redundant one doesn't).
1662 redundant one doesn't).
1663 '''
1663 '''
1664 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1664 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1665 for k in list(self._filecache.keys()):
1665 for k in list(self._filecache.keys()):
1666 # dirstate is invalidated separately in invalidatedirstate()
1666 # dirstate is invalidated separately in invalidatedirstate()
1667 if k == 'dirstate':
1667 if k == 'dirstate':
1668 continue
1668 continue
1669 if (k == 'changelog' and
1669 if (k == 'changelog' and
1670 self.currenttransaction() and
1670 self.currenttransaction() and
1671 self.changelog._delayed):
1671 self.changelog._delayed):
1672 # The changelog object may store unwritten revisions. We don't
1672 # The changelog object may store unwritten revisions. We don't
1673 # want to lose them.
1673 # want to lose them.
1674 # TODO: Solve the problem instead of working around it.
1674 # TODO: Solve the problem instead of working around it.
1675 continue
1675 continue
1676
1676
1677 if clearfilecache:
1677 if clearfilecache:
1678 del self._filecache[k]
1678 del self._filecache[k]
1679 try:
1679 try:
1680 delattr(unfiltered, k)
1680 delattr(unfiltered, k)
1681 except AttributeError:
1681 except AttributeError:
1682 pass
1682 pass
1683 self.invalidatecaches()
1683 self.invalidatecaches()
1684 if not self.currenttransaction():
1684 if not self.currenttransaction():
1685 # TODO: Changing contents of store outside transaction
1685 # TODO: Changing contents of store outside transaction
1686 # causes inconsistency. We should make in-memory store
1686 # causes inconsistency. We should make in-memory store
1687 # changes detectable, and abort if changed.
1687 # changes detectable, and abort if changed.
1688 self.store.invalidatecaches()
1688 self.store.invalidatecaches()
1689
1689
1690 def invalidateall(self):
1690 def invalidateall(self):
1691 '''Fully invalidates both store and non-store parts, causing the
1691 '''Fully invalidates both store and non-store parts, causing the
1692 subsequent operation to reread any outside changes.'''
1692 subsequent operation to reread any outside changes.'''
1693 # extension should hook this to invalidate its caches
1693 # extension should hook this to invalidate its caches
1694 self.invalidate()
1694 self.invalidate()
1695 self.invalidatedirstate()
1695 self.invalidatedirstate()
1696
1696
1697 @unfilteredmethod
1697 @unfilteredmethod
1698 def _refreshfilecachestats(self, tr):
1698 def _refreshfilecachestats(self, tr):
1699 """Reload stats of cached files so that they are flagged as valid"""
1699 """Reload stats of cached files so that they are flagged as valid"""
1700 for k, ce in self._filecache.items():
1700 for k, ce in self._filecache.items():
1701 k = pycompat.sysstr(k)
1701 k = pycompat.sysstr(k)
1702 if k == r'dirstate' or k not in self.__dict__:
1702 if k == r'dirstate' or k not in self.__dict__:
1703 continue
1703 continue
1704 ce.refresh()
1704 ce.refresh()
1705
1705
1706 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1706 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1707 inheritchecker=None, parentenvvar=None):
1707 inheritchecker=None, parentenvvar=None):
1708 parentlock = None
1708 parentlock = None
1709 # the contents of parentenvvar are used by the underlying lock to
1709 # the contents of parentenvvar are used by the underlying lock to
1710 # determine whether it can be inherited
1710 # determine whether it can be inherited
1711 if parentenvvar is not None:
1711 if parentenvvar is not None:
1712 parentlock = encoding.environ.get(parentenvvar)
1712 parentlock = encoding.environ.get(parentenvvar)
1713
1713
1714 timeout = 0
1714 timeout = 0
1715 warntimeout = 0
1715 warntimeout = 0
1716 if wait:
1716 if wait:
1717 timeout = self.ui.configint("ui", "timeout")
1717 timeout = self.ui.configint("ui", "timeout")
1718 warntimeout = self.ui.configint("ui", "timeout.warn")
1718 warntimeout = self.ui.configint("ui", "timeout.warn")
1719 # internal config: ui.signal-safe-lock
1719 # internal config: ui.signal-safe-lock
1720 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1720 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1721
1721
1722 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1722 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1723 releasefn=releasefn,
1723 releasefn=releasefn,
1724 acquirefn=acquirefn, desc=desc,
1724 acquirefn=acquirefn, desc=desc,
1725 inheritchecker=inheritchecker,
1725 inheritchecker=inheritchecker,
1726 parentlock=parentlock,
1726 parentlock=parentlock,
1727 signalsafe=signalsafe)
1727 signalsafe=signalsafe)
1728 return l
1728 return l
1729
1729
1730 def _afterlock(self, callback):
1730 def _afterlock(self, callback):
1731 """add a callback to be run when the repository is fully unlocked
1731 """add a callback to be run when the repository is fully unlocked
1732
1732
1733 The callback will be executed when the outermost lock is released
1733 The callback will be executed when the outermost lock is released
1734 (with wlock being higher level than 'lock')."""
1734 (with wlock being higher level than 'lock')."""
1735 for ref in (self._wlockref, self._lockref):
1735 for ref in (self._wlockref, self._lockref):
1736 l = ref and ref()
1736 l = ref and ref()
1737 if l and l.held:
1737 if l and l.held:
1738 l.postrelease.append(callback)
1738 l.postrelease.append(callback)
1739 break
1739 break
1740 else: # no lock have been found.
1740 else: # no lock have been found.
1741 callback()
1741 callback()
1742
1742
1743 def lock(self, wait=True):
1743 def lock(self, wait=True):
1744 '''Lock the repository store (.hg/store) and return a weak reference
1744 '''Lock the repository store (.hg/store) and return a weak reference
1745 to the lock. Use this before modifying the store (e.g. committing or
1745 to the lock. Use this before modifying the store (e.g. committing or
1746 stripping). If you are opening a transaction, get a lock as well.)
1746 stripping). If you are opening a transaction, get a lock as well.)
1747
1747
1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1749 'wlock' first to avoid a dead-lock hazard.'''
1749 'wlock' first to avoid a dead-lock hazard.'''
1750 l = self._currentlock(self._lockref)
1750 l = self._currentlock(self._lockref)
1751 if l is not None:
1751 if l is not None:
1752 l.lock()
1752 l.lock()
1753 return l
1753 return l
1754
1754
1755 l = self._lock(self.svfs, "lock", wait, None,
1755 l = self._lock(self.svfs, "lock", wait, None,
1756 self.invalidate, _('repository %s') % self.origroot)
1756 self.invalidate, _('repository %s') % self.origroot)
1757 self._lockref = weakref.ref(l)
1757 self._lockref = weakref.ref(l)
1758 return l
1758 return l
1759
1759
1760 def _wlockchecktransaction(self):
1760 def _wlockchecktransaction(self):
1761 if self.currenttransaction() is not None:
1761 if self.currenttransaction() is not None:
1762 raise error.LockInheritanceContractViolation(
1762 raise error.LockInheritanceContractViolation(
1763 'wlock cannot be inherited in the middle of a transaction')
1763 'wlock cannot be inherited in the middle of a transaction')
1764
1764
1765 def wlock(self, wait=True):
1765 def wlock(self, wait=True):
1766 '''Lock the non-store parts of the repository (everything under
1766 '''Lock the non-store parts of the repository (everything under
1767 .hg except .hg/store) and return a weak reference to the lock.
1767 .hg except .hg/store) and return a weak reference to the lock.
1768
1768
1769 Use this before modifying files in .hg.
1769 Use this before modifying files in .hg.
1770
1770
1771 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1771 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1772 'wlock' first to avoid a dead-lock hazard.'''
1772 'wlock' first to avoid a dead-lock hazard.'''
1773 l = self._wlockref and self._wlockref()
1773 l = self._wlockref and self._wlockref()
1774 if l is not None and l.held:
1774 if l is not None and l.held:
1775 l.lock()
1775 l.lock()
1776 return l
1776 return l
1777
1777
1778 # We do not need to check for non-waiting lock acquisition. Such
1778 # We do not need to check for non-waiting lock acquisition. Such
1779 # acquisition would not cause dead-lock as they would just fail.
1779 # acquisition would not cause dead-lock as they would just fail.
1780 if wait and (self.ui.configbool('devel', 'all-warnings')
1780 if wait and (self.ui.configbool('devel', 'all-warnings')
1781 or self.ui.configbool('devel', 'check-locks')):
1781 or self.ui.configbool('devel', 'check-locks')):
1782 if self._currentlock(self._lockref) is not None:
1782 if self._currentlock(self._lockref) is not None:
1783 self.ui.develwarn('"wlock" acquired after "lock"')
1783 self.ui.develwarn('"wlock" acquired after "lock"')
1784
1784
1785 def unlock():
1785 def unlock():
1786 if self.dirstate.pendingparentchange():
1786 if self.dirstate.pendingparentchange():
1787 self.dirstate.invalidate()
1787 self.dirstate.invalidate()
1788 else:
1788 else:
1789 self.dirstate.write(None)
1789 self.dirstate.write(None)
1790
1790
1791 self._filecache['dirstate'].refresh()
1791 self._filecache['dirstate'].refresh()
1792
1792
1793 l = self._lock(self.vfs, "wlock", wait, unlock,
1793 l = self._lock(self.vfs, "wlock", wait, unlock,
1794 self.invalidatedirstate, _('working directory of %s') %
1794 self.invalidatedirstate, _('working directory of %s') %
1795 self.origroot,
1795 self.origroot,
1796 inheritchecker=self._wlockchecktransaction,
1796 inheritchecker=self._wlockchecktransaction,
1797 parentenvvar='HG_WLOCK_LOCKER')
1797 parentenvvar='HG_WLOCK_LOCKER')
1798 self._wlockref = weakref.ref(l)
1798 self._wlockref = weakref.ref(l)
1799 return l
1799 return l
1800
1800
1801 def _currentlock(self, lockref):
1801 def _currentlock(self, lockref):
1802 """Returns the lock if it's held, or None if it's not."""
1802 """Returns the lock if it's held, or None if it's not."""
1803 if lockref is None:
1803 if lockref is None:
1804 return None
1804 return None
1805 l = lockref()
1805 l = lockref()
1806 if l is None or not l.held:
1806 if l is None or not l.held:
1807 return None
1807 return None
1808 return l
1808 return l
1809
1809
1810 def currentwlock(self):
1810 def currentwlock(self):
1811 """Returns the wlock if it's held, or None if it's not."""
1811 """Returns the wlock if it's held, or None if it's not."""
1812 return self._currentlock(self._wlockref)
1812 return self._currentlock(self._wlockref)
1813
1813
1814 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1814 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1815 """
1815 """
1816 commit an individual file as part of a larger transaction
1816 commit an individual file as part of a larger transaction
1817 """
1817 """
1818
1818
1819 fname = fctx.path()
1819 fname = fctx.path()
1820 fparent1 = manifest1.get(fname, nullid)
1820 fparent1 = manifest1.get(fname, nullid)
1821 fparent2 = manifest2.get(fname, nullid)
1821 fparent2 = manifest2.get(fname, nullid)
1822 if isinstance(fctx, context.filectx):
1822 if isinstance(fctx, context.filectx):
1823 node = fctx.filenode()
1823 node = fctx.filenode()
1824 if node in [fparent1, fparent2]:
1824 if node in [fparent1, fparent2]:
1825 self.ui.debug('reusing %s filelog entry\n' % fname)
1825 self.ui.debug('reusing %s filelog entry\n' % fname)
1826 if manifest1.flags(fname) != fctx.flags():
1826 if manifest1.flags(fname) != fctx.flags():
1827 changelist.append(fname)
1827 changelist.append(fname)
1828 return node
1828 return node
1829
1829
1830 flog = self.file(fname)
1830 flog = self.file(fname)
1831 meta = {}
1831 meta = {}
1832 copy = fctx.renamed()
1832 copy = fctx.renamed()
1833 if copy and copy[0] != fname:
1833 if copy and copy[0] != fname:
1834 # Mark the new revision of this file as a copy of another
1834 # Mark the new revision of this file as a copy of another
1835 # file. This copy data will effectively act as a parent
1835 # file. This copy data will effectively act as a parent
1836 # of this new revision. If this is a merge, the first
1836 # of this new revision. If this is a merge, the first
1837 # parent will be the nullid (meaning "look up the copy data")
1837 # parent will be the nullid (meaning "look up the copy data")
1838 # and the second one will be the other parent. For example:
1838 # and the second one will be the other parent. For example:
1839 #
1839 #
1840 # 0 --- 1 --- 3 rev1 changes file foo
1840 # 0 --- 1 --- 3 rev1 changes file foo
1841 # \ / rev2 renames foo to bar and changes it
1841 # \ / rev2 renames foo to bar and changes it
1842 # \- 2 -/ rev3 should have bar with all changes and
1842 # \- 2 -/ rev3 should have bar with all changes and
1843 # should record that bar descends from
1843 # should record that bar descends from
1844 # bar in rev2 and foo in rev1
1844 # bar in rev2 and foo in rev1
1845 #
1845 #
1846 # this allows this merge to succeed:
1846 # this allows this merge to succeed:
1847 #
1847 #
1848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1849 # \ / merging rev3 and rev4 should use bar@rev2
1849 # \ / merging rev3 and rev4 should use bar@rev2
1850 # \- 2 --- 4 as the merge base
1850 # \- 2 --- 4 as the merge base
1851 #
1851 #
1852
1852
1853 cfname = copy[0]
1853 cfname = copy[0]
1854 crev = manifest1.get(cfname)
1854 crev = manifest1.get(cfname)
1855 newfparent = fparent2
1855 newfparent = fparent2
1856
1856
1857 if manifest2: # branch merge
1857 if manifest2: # branch merge
1858 if fparent2 == nullid or crev is None: # copied on remote side
1858 if fparent2 == nullid or crev is None: # copied on remote side
1859 if cfname in manifest2:
1859 if cfname in manifest2:
1860 crev = manifest2[cfname]
1860 crev = manifest2[cfname]
1861 newfparent = fparent1
1861 newfparent = fparent1
1862
1862
1863 # Here, we used to search backwards through history to try to find
1863 # Here, we used to search backwards through history to try to find
1864 # where the file copy came from if the source of a copy was not in
1864 # where the file copy came from if the source of a copy was not in
1865 # the parent directory. However, this doesn't actually make sense to
1865 # the parent directory. However, this doesn't actually make sense to
1866 # do (what does a copy from something not in your working copy even
1866 # do (what does a copy from something not in your working copy even
1867 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1867 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1868 # the user that copy information was dropped, so if they didn't
1868 # the user that copy information was dropped, so if they didn't
1869 # expect this outcome it can be fixed, but this is the correct
1869 # expect this outcome it can be fixed, but this is the correct
1870 # behavior in this circumstance.
1870 # behavior in this circumstance.
1871
1871
1872 if crev:
1872 if crev:
1873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1874 meta["copy"] = cfname
1874 meta["copy"] = cfname
1875 meta["copyrev"] = hex(crev)
1875 meta["copyrev"] = hex(crev)
1876 fparent1, fparent2 = nullid, newfparent
1876 fparent1, fparent2 = nullid, newfparent
1877 else:
1877 else:
1878 self.ui.warn(_("warning: can't find ancestor for '%s' "
1878 self.ui.warn(_("warning: can't find ancestor for '%s' "
1879 "copied from '%s'!\n") % (fname, cfname))
1879 "copied from '%s'!\n") % (fname, cfname))
1880
1880
1881 elif fparent1 == nullid:
1881 elif fparent1 == nullid:
1882 fparent1, fparent2 = fparent2, nullid
1882 fparent1, fparent2 = fparent2, nullid
1883 elif fparent2 != nullid:
1883 elif fparent2 != nullid:
1884 # is one parent an ancestor of the other?
1884 # is one parent an ancestor of the other?
1885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1886 if fparent1 in fparentancestors:
1886 if fparent1 in fparentancestors:
1887 fparent1, fparent2 = fparent2, nullid
1887 fparent1, fparent2 = fparent2, nullid
1888 elif fparent2 in fparentancestors:
1888 elif fparent2 in fparentancestors:
1889 fparent2 = nullid
1889 fparent2 = nullid
1890
1890
1891 # is the file changed?
1891 # is the file changed?
1892 text = fctx.data()
1892 text = fctx.data()
1893 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1893 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1894 changelist.append(fname)
1894 changelist.append(fname)
1895 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1895 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1896 # are just the flags changed during merge?
1896 # are just the flags changed during merge?
1897 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1897 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1898 changelist.append(fname)
1898 changelist.append(fname)
1899
1899
1900 return fparent1
1900 return fparent1
1901
1901
1902 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1902 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1903 """check for commit arguments that aren't committable"""
1903 """check for commit arguments that aren't committable"""
1904 if match.isexact() or match.prefix():
1904 if match.isexact() or match.prefix():
1905 matched = set(status.modified + status.added + status.removed)
1905 matched = set(status.modified + status.added + status.removed)
1906
1906
1907 for f in match.files():
1907 for f in match.files():
1908 f = self.dirstate.normalize(f)
1908 f = self.dirstate.normalize(f)
1909 if f == '.' or f in matched or f in wctx.substate:
1909 if f == '.' or f in matched or f in wctx.substate:
1910 continue
1910 continue
1911 if f in status.deleted:
1911 if f in status.deleted:
1912 fail(f, _('file not found!'))
1912 fail(f, _('file not found!'))
1913 if f in vdirs: # visited directory
1913 if f in vdirs: # visited directory
1914 d = f + '/'
1914 d = f + '/'
1915 for mf in matched:
1915 for mf in matched:
1916 if mf.startswith(d):
1916 if mf.startswith(d):
1917 break
1917 break
1918 else:
1918 else:
1919 fail(f, _("no match under directory!"))
1919 fail(f, _("no match under directory!"))
1920 elif f not in self.dirstate:
1920 elif f not in self.dirstate:
1921 fail(f, _("file not tracked!"))
1921 fail(f, _("file not tracked!"))
1922
1922
1923 @unfilteredmethod
1923 @unfilteredmethod
1924 def commit(self, text="", user=None, date=None, match=None, force=False,
1924 def commit(self, text="", user=None, date=None, match=None, force=False,
1925 editor=False, extra=None):
1925 editor=False, extra=None):
1926 """Add a new revision to current repository.
1926 """Add a new revision to current repository.
1927
1927
1928 Revision information is gathered from the working directory,
1928 Revision information is gathered from the working directory,
1929 match can be used to filter the committed files. If editor is
1929 match can be used to filter the committed files. If editor is
1930 supplied, it is called to get a commit message.
1930 supplied, it is called to get a commit message.
1931 """
1931 """
1932 if extra is None:
1932 if extra is None:
1933 extra = {}
1933 extra = {}
1934
1934
1935 def fail(f, msg):
1935 def fail(f, msg):
1936 raise error.Abort('%s: %s' % (f, msg))
1936 raise error.Abort('%s: %s' % (f, msg))
1937
1937
1938 if not match:
1938 if not match:
1939 match = matchmod.always(self.root, '')
1939 match = matchmod.always(self.root, '')
1940
1940
1941 if not force:
1941 if not force:
1942 vdirs = []
1942 vdirs = []
1943 match.explicitdir = vdirs.append
1943 match.explicitdir = vdirs.append
1944 match.bad = fail
1944 match.bad = fail
1945
1945
1946 wlock = lock = tr = None
1946 wlock = lock = tr = None
1947 try:
1947 try:
1948 wlock = self.wlock()
1948 wlock = self.wlock()
1949 lock = self.lock() # for recent changelog (see issue4368)
1949 lock = self.lock() # for recent changelog (see issue4368)
1950
1950
1951 wctx = self[None]
1951 wctx = self[None]
1952 merge = len(wctx.parents()) > 1
1952 merge = len(wctx.parents()) > 1
1953
1953
1954 if not force and merge and not match.always():
1954 if not force and merge and not match.always():
1955 raise error.Abort(_('cannot partially commit a merge '
1955 raise error.Abort(_('cannot partially commit a merge '
1956 '(do not specify files or patterns)'))
1956 '(do not specify files or patterns)'))
1957
1957
1958 status = self.status(match=match, clean=force)
1958 status = self.status(match=match, clean=force)
1959 if force:
1959 if force:
1960 status.modified.extend(status.clean) # mq may commit clean files
1960 status.modified.extend(status.clean) # mq may commit clean files
1961
1961
1962 # check subrepos
1962 # check subrepos
1963 subs, commitsubs, newstate = subrepoutil.precommit(
1963 subs, commitsubs, newstate = subrepoutil.precommit(
1964 self.ui, wctx, status, match, force=force)
1964 self.ui, wctx, status, match, force=force)
1965
1965
1966 # make sure all explicit patterns are matched
1966 # make sure all explicit patterns are matched
1967 if not force:
1967 if not force:
1968 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1968 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1969
1969
1970 cctx = context.workingcommitctx(self, status,
1970 cctx = context.workingcommitctx(self, status,
1971 text, user, date, extra)
1971 text, user, date, extra)
1972
1972
1973 # internal config: ui.allowemptycommit
1973 # internal config: ui.allowemptycommit
1974 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1974 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1975 or extra.get('close') or merge or cctx.files()
1975 or extra.get('close') or merge or cctx.files()
1976 or self.ui.configbool('ui', 'allowemptycommit'))
1976 or self.ui.configbool('ui', 'allowemptycommit'))
1977 if not allowemptycommit:
1977 if not allowemptycommit:
1978 return None
1978 return None
1979
1979
1980 if merge and cctx.deleted():
1980 if merge and cctx.deleted():
1981 raise error.Abort(_("cannot commit merge with missing files"))
1981 raise error.Abort(_("cannot commit merge with missing files"))
1982
1982
1983 ms = mergemod.mergestate.read(self)
1983 ms = mergemod.mergestate.read(self)
1984 mergeutil.checkunresolved(ms)
1984 mergeutil.checkunresolved(ms)
1985
1985
1986 if editor:
1986 if editor:
1987 cctx._text = editor(self, cctx, subs)
1987 cctx._text = editor(self, cctx, subs)
1988 edited = (text != cctx._text)
1988 edited = (text != cctx._text)
1989
1989
1990 # Save commit message in case this transaction gets rolled back
1990 # Save commit message in case this transaction gets rolled back
1991 # (e.g. by a pretxncommit hook). Leave the content alone on
1991 # (e.g. by a pretxncommit hook). Leave the content alone on
1992 # the assumption that the user will use the same editor again.
1992 # the assumption that the user will use the same editor again.
1993 msgfn = self.savecommitmessage(cctx._text)
1993 msgfn = self.savecommitmessage(cctx._text)
1994
1994
1995 # commit subs and write new state
1995 # commit subs and write new state
1996 if subs:
1996 if subs:
1997 for s in sorted(commitsubs):
1997 for s in sorted(commitsubs):
1998 sub = wctx.sub(s)
1998 sub = wctx.sub(s)
1999 self.ui.status(_('committing subrepository %s\n') %
1999 self.ui.status(_('committing subrepository %s\n') %
2000 subrepoutil.subrelpath(sub))
2000 subrepoutil.subrelpath(sub))
2001 sr = sub.commit(cctx._text, user, date)
2001 sr = sub.commit(cctx._text, user, date)
2002 newstate[s] = (newstate[s][0], sr)
2002 newstate[s] = (newstate[s][0], sr)
2003 subrepoutil.writestate(self, newstate)
2003 subrepoutil.writestate(self, newstate)
2004
2004
2005 p1, p2 = self.dirstate.parents()
2005 p1, p2 = self.dirstate.parents()
2006 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2006 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2007 try:
2007 try:
2008 self.hook("precommit", throw=True, parent1=hookp1,
2008 self.hook("precommit", throw=True, parent1=hookp1,
2009 parent2=hookp2)
2009 parent2=hookp2)
2010 tr = self.transaction('commit')
2010 tr = self.transaction('commit')
2011 ret = self.commitctx(cctx, True)
2011 ret = self.commitctx(cctx, True)
2012 except: # re-raises
2012 except: # re-raises
2013 if edited:
2013 if edited:
2014 self.ui.write(
2014 self.ui.write(
2015 _('note: commit message saved in %s\n') % msgfn)
2015 _('note: commit message saved in %s\n') % msgfn)
2016 raise
2016 raise
2017 # update bookmarks, dirstate and mergestate
2017 # update bookmarks, dirstate and mergestate
2018 bookmarks.update(self, [p1, p2], ret)
2018 bookmarks.update(self, [p1, p2], ret)
2019 cctx.markcommitted(ret)
2019 cctx.markcommitted(ret)
2020 ms.reset()
2020 ms.reset()
2021 tr.close()
2021 tr.close()
2022
2022
2023 finally:
2023 finally:
2024 lockmod.release(tr, lock, wlock)
2024 lockmod.release(tr, lock, wlock)
2025
2025
2026 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2026 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2027 # hack for command that use a temporary commit (eg: histedit)
2027 # hack for command that use a temporary commit (eg: histedit)
2028 # temporary commit got stripped before hook release
2028 # temporary commit got stripped before hook release
2029 if self.changelog.hasnode(ret):
2029 if self.changelog.hasnode(ret):
2030 self.hook("commit", node=node, parent1=parent1,
2030 self.hook("commit", node=node, parent1=parent1,
2031 parent2=parent2)
2031 parent2=parent2)
2032 self._afterlock(commithook)
2032 self._afterlock(commithook)
2033 return ret
2033 return ret
2034
2034
2035 @unfilteredmethod
2035 @unfilteredmethod
2036 def commitctx(self, ctx, error=False):
2036 def commitctx(self, ctx, error=False):
2037 """Add a new revision to current repository.
2037 """Add a new revision to current repository.
2038 Revision information is passed via the context argument.
2038 Revision information is passed via the context argument.
2039
2039
2040 ctx.files() should list all files involved in this commit, i.e.
2040 ctx.files() should list all files involved in this commit, i.e.
2041 modified/added/removed files. On merge, it may be wider than the
2041 modified/added/removed files. On merge, it may be wider than the
2042 ctx.files() to be committed, since any file nodes derived directly
2042 ctx.files() to be committed, since any file nodes derived directly
2043 from p1 or p2 are excluded from the committed ctx.files().
2043 from p1 or p2 are excluded from the committed ctx.files().
2044 """
2044 """
2045
2045
2046 tr = None
2046 tr = None
2047 p1, p2 = ctx.p1(), ctx.p2()
2047 p1, p2 = ctx.p1(), ctx.p2()
2048 user = ctx.user()
2048 user = ctx.user()
2049
2049
2050 lock = self.lock()
2050 lock = self.lock()
2051 try:
2051 try:
2052 tr = self.transaction("commit")
2052 tr = self.transaction("commit")
2053 trp = weakref.proxy(tr)
2053 trp = weakref.proxy(tr)
2054
2054
2055 if ctx.manifestnode():
2055 if ctx.manifestnode():
2056 # reuse an existing manifest revision
2056 # reuse an existing manifest revision
2057 self.ui.debug('reusing known manifest\n')
2057 self.ui.debug('reusing known manifest\n')
2058 mn = ctx.manifestnode()
2058 mn = ctx.manifestnode()
2059 files = ctx.files()
2059 files = ctx.files()
2060 elif ctx.files():
2060 elif ctx.files():
2061 m1ctx = p1.manifestctx()
2061 m1ctx = p1.manifestctx()
2062 m2ctx = p2.manifestctx()
2062 m2ctx = p2.manifestctx()
2063 mctx = m1ctx.copy()
2063 mctx = m1ctx.copy()
2064
2064
2065 m = mctx.read()
2065 m = mctx.read()
2066 m1 = m1ctx.read()
2066 m1 = m1ctx.read()
2067 m2 = m2ctx.read()
2067 m2 = m2ctx.read()
2068
2068
2069 # check in files
2069 # check in files
2070 added = []
2070 added = []
2071 changed = []
2071 changed = []
2072 removed = list(ctx.removed())
2072 removed = list(ctx.removed())
2073 linkrev = len(self)
2073 linkrev = len(self)
2074 self.ui.note(_("committing files:\n"))
2074 self.ui.note(_("committing files:\n"))
2075 for f in sorted(ctx.modified() + ctx.added()):
2075 for f in sorted(ctx.modified() + ctx.added()):
2076 self.ui.note(f + "\n")
2076 self.ui.note(f + "\n")
2077 try:
2077 try:
2078 fctx = ctx[f]
2078 fctx = ctx[f]
2079 if fctx is None:
2079 if fctx is None:
2080 removed.append(f)
2080 removed.append(f)
2081 else:
2081 else:
2082 added.append(f)
2082 added.append(f)
2083 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2083 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2084 trp, changed)
2084 trp, changed)
2085 m.setflag(f, fctx.flags())
2085 m.setflag(f, fctx.flags())
2086 except OSError as inst:
2086 except OSError as inst:
2087 self.ui.warn(_("trouble committing %s!\n") % f)
2087 self.ui.warn(_("trouble committing %s!\n") % f)
2088 raise
2088 raise
2089 except IOError as inst:
2089 except IOError as inst:
2090 errcode = getattr(inst, 'errno', errno.ENOENT)
2090 errcode = getattr(inst, 'errno', errno.ENOENT)
2091 if error or errcode and errcode != errno.ENOENT:
2091 if error or errcode and errcode != errno.ENOENT:
2092 self.ui.warn(_("trouble committing %s!\n") % f)
2092 self.ui.warn(_("trouble committing %s!\n") % f)
2093 raise
2093 raise
2094
2094
2095 # update manifest
2095 # update manifest
2096 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2096 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2097 drop = [f for f in removed if f in m]
2097 drop = [f for f in removed if f in m]
2098 for f in drop:
2098 for f in drop:
2099 del m[f]
2099 del m[f]
2100 files = changed + removed
2100 files = changed + removed
2101 md = None
2101 md = None
2102 if not files:
2102 if not files:
2103 # if no "files" actually changed in terms of the changelog,
2103 # if no "files" actually changed in terms of the changelog,
2104 # try hard to detect unmodified manifest entry so that the
2104 # try hard to detect unmodified manifest entry so that the
2105 # exact same commit can be reproduced later on convert.
2105 # exact same commit can be reproduced later on convert.
2106 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2106 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2107 if not files and md:
2107 if not files and md:
2108 self.ui.debug('not reusing manifest (no file change in '
2108 self.ui.debug('not reusing manifest (no file change in '
2109 'changelog, but manifest differs)\n')
2109 'changelog, but manifest differs)\n')
2110 if files or md:
2110 if files or md:
2111 self.ui.note(_("committing manifest\n"))
2111 self.ui.note(_("committing manifest\n"))
2112 mn = mctx.write(trp, linkrev,
2112 mn = mctx.write(trp, linkrev,
2113 p1.manifestnode(), p2.manifestnode(),
2113 p1.manifestnode(), p2.manifestnode(),
2114 added, drop)
2114 added, drop)
2115 else:
2115 else:
2116 self.ui.debug('reusing manifest form p1 (listed files '
2116 self.ui.debug('reusing manifest form p1 (listed files '
2117 'actually unchanged)\n')
2117 'actually unchanged)\n')
2118 mn = p1.manifestnode()
2118 mn = p1.manifestnode()
2119 else:
2119 else:
2120 self.ui.debug('reusing manifest from p1 (no file change)\n')
2120 self.ui.debug('reusing manifest from p1 (no file change)\n')
2121 mn = p1.manifestnode()
2121 mn = p1.manifestnode()
2122 files = []
2122 files = []
2123
2123
2124 # update changelog
2124 # update changelog
2125 self.ui.note(_("committing changelog\n"))
2125 self.ui.note(_("committing changelog\n"))
2126 self.changelog.delayupdate(tr)
2126 self.changelog.delayupdate(tr)
2127 n = self.changelog.add(mn, files, ctx.description(),
2127 n = self.changelog.add(mn, files, ctx.description(),
2128 trp, p1.node(), p2.node(),
2128 trp, p1.node(), p2.node(),
2129 user, ctx.date(), ctx.extra().copy())
2129 user, ctx.date(), ctx.extra().copy())
2130 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2130 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2131 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2131 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2132 parent2=xp2)
2132 parent2=xp2)
2133 # set the new commit is proper phase
2133 # set the new commit is proper phase
2134 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2134 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2135 if targetphase:
2135 if targetphase:
2136 # retract boundary do not alter parent changeset.
2136 # retract boundary do not alter parent changeset.
2137 # if a parent have higher the resulting phase will
2137 # if a parent have higher the resulting phase will
2138 # be compliant anyway
2138 # be compliant anyway
2139 #
2139 #
2140 # if minimal phase was 0 we don't need to retract anything
2140 # if minimal phase was 0 we don't need to retract anything
2141 phases.registernew(self, tr, targetphase, [n])
2141 phases.registernew(self, tr, targetphase, [n])
2142 tr.close()
2142 tr.close()
2143 return n
2143 return n
2144 finally:
2144 finally:
2145 if tr:
2145 if tr:
2146 tr.release()
2146 tr.release()
2147 lock.release()
2147 lock.release()
2148
2148
2149 @unfilteredmethod
2149 @unfilteredmethod
2150 def destroying(self):
2150 def destroying(self):
2151 '''Inform the repository that nodes are about to be destroyed.
2151 '''Inform the repository that nodes are about to be destroyed.
2152 Intended for use by strip and rollback, so there's a common
2152 Intended for use by strip and rollback, so there's a common
2153 place for anything that has to be done before destroying history.
2153 place for anything that has to be done before destroying history.
2154
2154
2155 This is mostly useful for saving state that is in memory and waiting
2155 This is mostly useful for saving state that is in memory and waiting
2156 to be flushed when the current lock is released. Because a call to
2156 to be flushed when the current lock is released. Because a call to
2157 destroyed is imminent, the repo will be invalidated causing those
2157 destroyed is imminent, the repo will be invalidated causing those
2158 changes to stay in memory (waiting for the next unlock), or vanish
2158 changes to stay in memory (waiting for the next unlock), or vanish
2159 completely.
2159 completely.
2160 '''
2160 '''
2161 # When using the same lock to commit and strip, the phasecache is left
2161 # When using the same lock to commit and strip, the phasecache is left
2162 # dirty after committing. Then when we strip, the repo is invalidated,
2162 # dirty after committing. Then when we strip, the repo is invalidated,
2163 # causing those changes to disappear.
2163 # causing those changes to disappear.
2164 if '_phasecache' in vars(self):
2164 if '_phasecache' in vars(self):
2165 self._phasecache.write()
2165 self._phasecache.write()
2166
2166
2167 @unfilteredmethod
2167 @unfilteredmethod
2168 def destroyed(self):
2168 def destroyed(self):
2169 '''Inform the repository that nodes have been destroyed.
2169 '''Inform the repository that nodes have been destroyed.
2170 Intended for use by strip and rollback, so there's a common
2170 Intended for use by strip and rollback, so there's a common
2171 place for anything that has to be done after destroying history.
2171 place for anything that has to be done after destroying history.
2172 '''
2172 '''
2173 # When one tries to:
2173 # When one tries to:
2174 # 1) destroy nodes thus calling this method (e.g. strip)
2174 # 1) destroy nodes thus calling this method (e.g. strip)
2175 # 2) use phasecache somewhere (e.g. commit)
2175 # 2) use phasecache somewhere (e.g. commit)
2176 #
2176 #
2177 # then 2) will fail because the phasecache contains nodes that were
2177 # then 2) will fail because the phasecache contains nodes that were
2178 # removed. We can either remove phasecache from the filecache,
2178 # removed. We can either remove phasecache from the filecache,
2179 # causing it to reload next time it is accessed, or simply filter
2179 # causing it to reload next time it is accessed, or simply filter
2180 # the removed nodes now and write the updated cache.
2180 # the removed nodes now and write the updated cache.
2181 self._phasecache.filterunknown(self)
2181 self._phasecache.filterunknown(self)
2182 self._phasecache.write()
2182 self._phasecache.write()
2183
2183
2184 # refresh all repository caches
2184 # refresh all repository caches
2185 self.updatecaches()
2185 self.updatecaches()
2186
2186
2187 # Ensure the persistent tag cache is updated. Doing it now
2187 # Ensure the persistent tag cache is updated. Doing it now
2188 # means that the tag cache only has to worry about destroyed
2188 # means that the tag cache only has to worry about destroyed
2189 # heads immediately after a strip/rollback. That in turn
2189 # heads immediately after a strip/rollback. That in turn
2190 # guarantees that "cachetip == currenttip" (comparing both rev
2190 # guarantees that "cachetip == currenttip" (comparing both rev
2191 # and node) always means no nodes have been added or destroyed.
2191 # and node) always means no nodes have been added or destroyed.
2192
2192
2193 # XXX this is suboptimal when qrefresh'ing: we strip the current
2193 # XXX this is suboptimal when qrefresh'ing: we strip the current
2194 # head, refresh the tag cache, then immediately add a new head.
2194 # head, refresh the tag cache, then immediately add a new head.
2195 # But I think doing it this way is necessary for the "instant
2195 # But I think doing it this way is necessary for the "instant
2196 # tag cache retrieval" case to work.
2196 # tag cache retrieval" case to work.
2197 self.invalidate()
2197 self.invalidate()
2198
2198
2199 def status(self, node1='.', node2=None, match=None,
2199 def status(self, node1='.', node2=None, match=None,
2200 ignored=False, clean=False, unknown=False,
2200 ignored=False, clean=False, unknown=False,
2201 listsubrepos=False):
2201 listsubrepos=False):
2202 '''a convenience method that calls node1.status(node2)'''
2202 '''a convenience method that calls node1.status(node2)'''
2203 return self[node1].status(node2, match, ignored, clean, unknown,
2203 return self[node1].status(node2, match, ignored, clean, unknown,
2204 listsubrepos)
2204 listsubrepos)
2205
2205
2206 def addpostdsstatus(self, ps):
2206 def addpostdsstatus(self, ps):
2207 """Add a callback to run within the wlock, at the point at which status
2207 """Add a callback to run within the wlock, at the point at which status
2208 fixups happen.
2208 fixups happen.
2209
2209
2210 On status completion, callback(wctx, status) will be called with the
2210 On status completion, callback(wctx, status) will be called with the
2211 wlock held, unless the dirstate has changed from underneath or the wlock
2211 wlock held, unless the dirstate has changed from underneath or the wlock
2212 couldn't be grabbed.
2212 couldn't be grabbed.
2213
2213
2214 Callbacks should not capture and use a cached copy of the dirstate --
2214 Callbacks should not capture and use a cached copy of the dirstate --
2215 it might change in the meanwhile. Instead, they should access the
2215 it might change in the meanwhile. Instead, they should access the
2216 dirstate via wctx.repo().dirstate.
2216 dirstate via wctx.repo().dirstate.
2217
2217
2218 This list is emptied out after each status run -- extensions should
2218 This list is emptied out after each status run -- extensions should
2219 make sure it adds to this list each time dirstate.status is called.
2219 make sure it adds to this list each time dirstate.status is called.
2220 Extensions should also make sure they don't call this for statuses
2220 Extensions should also make sure they don't call this for statuses
2221 that don't involve the dirstate.
2221 that don't involve the dirstate.
2222 """
2222 """
2223
2223
2224 # The list is located here for uniqueness reasons -- it is actually
2224 # The list is located here for uniqueness reasons -- it is actually
2225 # managed by the workingctx, but that isn't unique per-repo.
2225 # managed by the workingctx, but that isn't unique per-repo.
2226 self._postdsstatus.append(ps)
2226 self._postdsstatus.append(ps)
2227
2227
2228 def postdsstatus(self):
2228 def postdsstatus(self):
2229 """Used by workingctx to get the list of post-dirstate-status hooks."""
2229 """Used by workingctx to get the list of post-dirstate-status hooks."""
2230 return self._postdsstatus
2230 return self._postdsstatus
2231
2231
2232 def clearpostdsstatus(self):
2232 def clearpostdsstatus(self):
2233 """Used by workingctx to clear post-dirstate-status hooks."""
2233 """Used by workingctx to clear post-dirstate-status hooks."""
2234 del self._postdsstatus[:]
2234 del self._postdsstatus[:]
2235
2235
2236 def heads(self, start=None):
2236 def heads(self, start=None):
2237 if start is None:
2237 if start is None:
2238 cl = self.changelog
2238 cl = self.changelog
2239 headrevs = reversed(cl.headrevs())
2239 headrevs = reversed(cl.headrevs())
2240 return [cl.node(rev) for rev in headrevs]
2240 return [cl.node(rev) for rev in headrevs]
2241
2241
2242 heads = self.changelog.heads(start)
2242 heads = self.changelog.heads(start)
2243 # sort the output in rev descending order
2243 # sort the output in rev descending order
2244 return sorted(heads, key=self.changelog.rev, reverse=True)
2244 return sorted(heads, key=self.changelog.rev, reverse=True)
2245
2245
2246 def branchheads(self, branch=None, start=None, closed=False):
2246 def branchheads(self, branch=None, start=None, closed=False):
2247 '''return a (possibly filtered) list of heads for the given branch
2247 '''return a (possibly filtered) list of heads for the given branch
2248
2248
2249 Heads are returned in topological order, from newest to oldest.
2249 Heads are returned in topological order, from newest to oldest.
2250 If branch is None, use the dirstate branch.
2250 If branch is None, use the dirstate branch.
2251 If start is not None, return only heads reachable from start.
2251 If start is not None, return only heads reachable from start.
2252 If closed is True, return heads that are marked as closed as well.
2252 If closed is True, return heads that are marked as closed as well.
2253 '''
2253 '''
2254 if branch is None:
2254 if branch is None:
2255 branch = self[None].branch()
2255 branch = self[None].branch()
2256 branches = self.branchmap()
2256 branches = self.branchmap()
2257 if branch not in branches:
2257 if branch not in branches:
2258 return []
2258 return []
2259 # the cache returns heads ordered lowest to highest
2259 # the cache returns heads ordered lowest to highest
2260 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2260 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2261 if start is not None:
2261 if start is not None:
2262 # filter out the heads that cannot be reached from startrev
2262 # filter out the heads that cannot be reached from startrev
2263 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2263 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2264 bheads = [h for h in bheads if h in fbheads]
2264 bheads = [h for h in bheads if h in fbheads]
2265 return bheads
2265 return bheads
2266
2266
2267 def branches(self, nodes):
2267 def branches(self, nodes):
2268 if not nodes:
2268 if not nodes:
2269 nodes = [self.changelog.tip()]
2269 nodes = [self.changelog.tip()]
2270 b = []
2270 b = []
2271 for n in nodes:
2271 for n in nodes:
2272 t = n
2272 t = n
2273 while True:
2273 while True:
2274 p = self.changelog.parents(n)
2274 p = self.changelog.parents(n)
2275 if p[1] != nullid or p[0] == nullid:
2275 if p[1] != nullid or p[0] == nullid:
2276 b.append((t, n, p[0], p[1]))
2276 b.append((t, n, p[0], p[1]))
2277 break
2277 break
2278 n = p[0]
2278 n = p[0]
2279 return b
2279 return b
2280
2280
2281 def between(self, pairs):
2281 def between(self, pairs):
2282 r = []
2282 r = []
2283
2283
2284 for top, bottom in pairs:
2284 for top, bottom in pairs:
2285 n, l, i = top, [], 0
2285 n, l, i = top, [], 0
2286 f = 1
2286 f = 1
2287
2287
2288 while n != bottom and n != nullid:
2288 while n != bottom and n != nullid:
2289 p = self.changelog.parents(n)[0]
2289 p = self.changelog.parents(n)[0]
2290 if i == f:
2290 if i == f:
2291 l.append(n)
2291 l.append(n)
2292 f = f * 2
2292 f = f * 2
2293 n = p
2293 n = p
2294 i += 1
2294 i += 1
2295
2295
2296 r.append(l)
2296 r.append(l)
2297
2297
2298 return r
2298 return r
2299
2299
2300 def checkpush(self, pushop):
2300 def checkpush(self, pushop):
2301 """Extensions can override this function if additional checks have
2301 """Extensions can override this function if additional checks have
2302 to be performed before pushing, or call it if they override push
2302 to be performed before pushing, or call it if they override push
2303 command.
2303 command.
2304 """
2304 """
2305
2305
2306 @unfilteredpropertycache
2306 @unfilteredpropertycache
2307 def prepushoutgoinghooks(self):
2307 def prepushoutgoinghooks(self):
2308 """Return util.hooks consists of a pushop with repo, remote, outgoing
2308 """Return util.hooks consists of a pushop with repo, remote, outgoing
2309 methods, which are called before pushing changesets.
2309 methods, which are called before pushing changesets.
2310 """
2310 """
2311 return util.hooks()
2311 return util.hooks()
2312
2312
2313 def pushkey(self, namespace, key, old, new):
2313 def pushkey(self, namespace, key, old, new):
2314 try:
2314 try:
2315 tr = self.currenttransaction()
2315 tr = self.currenttransaction()
2316 hookargs = {}
2316 hookargs = {}
2317 if tr is not None:
2317 if tr is not None:
2318 hookargs.update(tr.hookargs)
2318 hookargs.update(tr.hookargs)
2319 hookargs = pycompat.strkwargs(hookargs)
2319 hookargs = pycompat.strkwargs(hookargs)
2320 hookargs[r'namespace'] = namespace
2320 hookargs[r'namespace'] = namespace
2321 hookargs[r'key'] = key
2321 hookargs[r'key'] = key
2322 hookargs[r'old'] = old
2322 hookargs[r'old'] = old
2323 hookargs[r'new'] = new
2323 hookargs[r'new'] = new
2324 self.hook('prepushkey', throw=True, **hookargs)
2324 self.hook('prepushkey', throw=True, **hookargs)
2325 except error.HookAbort as exc:
2325 except error.HookAbort as exc:
2326 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2326 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2327 if exc.hint:
2327 if exc.hint:
2328 self.ui.write_err(_("(%s)\n") % exc.hint)
2328 self.ui.write_err(_("(%s)\n") % exc.hint)
2329 return False
2329 return False
2330 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2330 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2331 ret = pushkey.push(self, namespace, key, old, new)
2331 ret = pushkey.push(self, namespace, key, old, new)
2332 def runhook():
2332 def runhook():
2333 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2333 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2334 ret=ret)
2334 ret=ret)
2335 self._afterlock(runhook)
2335 self._afterlock(runhook)
2336 return ret
2336 return ret
2337
2337
2338 def listkeys(self, namespace):
2338 def listkeys(self, namespace):
2339 self.hook('prelistkeys', throw=True, namespace=namespace)
2339 self.hook('prelistkeys', throw=True, namespace=namespace)
2340 self.ui.debug('listing keys for "%s"\n' % namespace)
2340 self.ui.debug('listing keys for "%s"\n' % namespace)
2341 values = pushkey.list(self, namespace)
2341 values = pushkey.list(self, namespace)
2342 self.hook('listkeys', namespace=namespace, values=values)
2342 self.hook('listkeys', namespace=namespace, values=values)
2343 return values
2343 return values
2344
2344
2345 def debugwireargs(self, one, two, three=None, four=None, five=None):
2345 def debugwireargs(self, one, two, three=None, four=None, five=None):
2346 '''used to test argument passing over the wire'''
2346 '''used to test argument passing over the wire'''
2347 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2347 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2348 pycompat.bytestr(four),
2348 pycompat.bytestr(four),
2349 pycompat.bytestr(five))
2349 pycompat.bytestr(five))
2350
2350
2351 def savecommitmessage(self, text):
2351 def savecommitmessage(self, text):
2352 fp = self.vfs('last-message.txt', 'wb')
2352 fp = self.vfs('last-message.txt', 'wb')
2353 try:
2353 try:
2354 fp.write(text)
2354 fp.write(text)
2355 finally:
2355 finally:
2356 fp.close()
2356 fp.close()
2357 return self.pathto(fp.name[len(self.root) + 1:])
2357 return self.pathto(fp.name[len(self.root) + 1:])
2358
2358
2359 # used to avoid circular references so destructors work
2359 # used to avoid circular references so destructors work
2360 def aftertrans(files):
2360 def aftertrans(files):
2361 renamefiles = [tuple(t) for t in files]
2361 renamefiles = [tuple(t) for t in files]
2362 def a():
2362 def a():
2363 for vfs, src, dest in renamefiles:
2363 for vfs, src, dest in renamefiles:
2364 # if src and dest refer to a same file, vfs.rename is a no-op,
2364 # if src and dest refer to a same file, vfs.rename is a no-op,
2365 # leaving both src and dest on disk. delete dest to make sure
2365 # leaving both src and dest on disk. delete dest to make sure
2366 # the rename couldn't be such a no-op.
2366 # the rename couldn't be such a no-op.
2367 vfs.tryunlink(dest)
2367 vfs.tryunlink(dest)
2368 try:
2368 try:
2369 vfs.rename(src, dest)
2369 vfs.rename(src, dest)
2370 except OSError: # journal file does not yet exist
2370 except OSError: # journal file does not yet exist
2371 pass
2371 pass
2372 return a
2372 return a
2373
2373
2374 def undoname(fn):
2374 def undoname(fn):
2375 base, name = os.path.split(fn)
2375 base, name = os.path.split(fn)
2376 assert name.startswith('journal')
2376 assert name.startswith('journal')
2377 return os.path.join(base, name.replace('journal', 'undo', 1))
2377 return os.path.join(base, name.replace('journal', 'undo', 1))
2378
2378
2379 def instance(ui, path, create, intents=None, createopts=None):
2379 def instance(ui, path, create, intents=None, createopts=None):
2380 localpath = util.urllocalpath(path)
2380 localpath = util.urllocalpath(path)
2381 if create:
2381 if create:
2382 createrepository(ui, localpath, createopts=createopts)
2382 createrepository(ui, localpath, createopts=createopts)
2383
2383
2384 return localrepository(ui, localpath, intents=intents)
2384 return localrepository(ui, localpath, intents=intents)
2385
2385
2386 def islocal(path):
2386 def islocal(path):
2387 return True
2387 return True
2388
2388
2389 def newreporequirements(ui, createopts=None):
2389 def newreporequirements(ui, createopts=None):
2390 """Determine the set of requirements for a new local repository.
2390 """Determine the set of requirements for a new local repository.
2391
2391
2392 Extensions can wrap this function to specify custom requirements for
2392 Extensions can wrap this function to specify custom requirements for
2393 new repositories.
2393 new repositories.
2394 """
2394 """
2395 createopts = createopts or {}
2395 createopts = createopts or {}
2396
2396
2397 requirements = {'revlogv1'}
2397 requirements = {'revlogv1'}
2398 if ui.configbool('format', 'usestore'):
2398 if ui.configbool('format', 'usestore'):
2399 requirements.add('store')
2399 requirements.add('store')
2400 if ui.configbool('format', 'usefncache'):
2400 if ui.configbool('format', 'usefncache'):
2401 requirements.add('fncache')
2401 requirements.add('fncache')
2402 if ui.configbool('format', 'dotencode'):
2402 if ui.configbool('format', 'dotencode'):
2403 requirements.add('dotencode')
2403 requirements.add('dotencode')
2404
2404
2405 compengine = ui.config('experimental', 'format.compression')
2405 compengine = ui.config('experimental', 'format.compression')
2406 if compengine not in util.compengines:
2406 if compengine not in util.compengines:
2407 raise error.Abort(_('compression engine %s defined by '
2407 raise error.Abort(_('compression engine %s defined by '
2408 'experimental.format.compression not available') %
2408 'experimental.format.compression not available') %
2409 compengine,
2409 compengine,
2410 hint=_('run "hg debuginstall" to list available '
2410 hint=_('run "hg debuginstall" to list available '
2411 'compression engines'))
2411 'compression engines'))
2412
2412
2413 # zlib is the historical default and doesn't need an explicit requirement.
2413 # zlib is the historical default and doesn't need an explicit requirement.
2414 if compengine != 'zlib':
2414 if compengine != 'zlib':
2415 requirements.add('exp-compression-%s' % compengine)
2415 requirements.add('exp-compression-%s' % compengine)
2416
2416
2417 if scmutil.gdinitconfig(ui):
2417 if scmutil.gdinitconfig(ui):
2418 requirements.add('generaldelta')
2418 requirements.add('generaldelta')
2419 if ui.configbool('experimental', 'treemanifest'):
2419 if ui.configbool('experimental', 'treemanifest'):
2420 requirements.add('treemanifest')
2420 requirements.add('treemanifest')
2421 # experimental config: format.sparse-revlog
2421 # experimental config: format.sparse-revlog
2422 if ui.configbool('format', 'sparse-revlog'):
2422 if ui.configbool('format', 'sparse-revlog'):
2423 requirements.add(SPARSEREVLOG_REQUIREMENT)
2423 requirements.add(SPARSEREVLOG_REQUIREMENT)
2424
2424
2425 revlogv2 = ui.config('experimental', 'revlogv2')
2425 revlogv2 = ui.config('experimental', 'revlogv2')
2426 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2426 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2427 requirements.remove('revlogv1')
2427 requirements.remove('revlogv1')
2428 # generaldelta is implied by revlogv2.
2428 # generaldelta is implied by revlogv2.
2429 requirements.discard('generaldelta')
2429 requirements.discard('generaldelta')
2430 requirements.add(REVLOGV2_REQUIREMENT)
2430 requirements.add(REVLOGV2_REQUIREMENT)
2431 # experimental config: format.internal-phase
2431 # experimental config: format.internal-phase
2432 if ui.configbool('format', 'internal-phase'):
2432 if ui.configbool('format', 'internal-phase'):
2433 requirements.add('internal-phase')
2433 requirements.add('internal-phase')
2434
2434
2435 if createopts.get('narrowfiles'):
2435 if createopts.get('narrowfiles'):
2436 requirements.add(repository.NARROW_REQUIREMENT)
2436 requirements.add(repository.NARROW_REQUIREMENT)
2437
2437
2438 return requirements
2438 return requirements
2439
2439
2440 def filterknowncreateopts(ui, createopts):
2440 def filterknowncreateopts(ui, createopts):
2441 """Filters a dict of repo creation options against options that are known.
2441 """Filters a dict of repo creation options against options that are known.
2442
2442
2443 Receives a dict of repo creation options and returns a dict of those
2443 Receives a dict of repo creation options and returns a dict of those
2444 options that we don't know how to handle.
2444 options that we don't know how to handle.
2445
2445
2446 This function is called as part of repository creation. If the
2446 This function is called as part of repository creation. If the
2447 returned dict contains any items, repository creation will not
2447 returned dict contains any items, repository creation will not
2448 be allowed, as it means there was a request to create a repository
2448 be allowed, as it means there was a request to create a repository
2449 with options not recognized by loaded code.
2449 with options not recognized by loaded code.
2450
2450
2451 Extensions can wrap this function to filter out creation options
2451 Extensions can wrap this function to filter out creation options
2452 they know how to handle.
2452 they know how to handle.
2453 """
2453 """
2454 known = {'narrowfiles'}
2454 known = {'narrowfiles'}
2455
2455
2456 return {k: v for k, v in createopts.items() if k not in known}
2456 return {k: v for k, v in createopts.items() if k not in known}
2457
2457
2458 def createrepository(ui, path, createopts=None):
2458 def createrepository(ui, path, createopts=None):
2459 """Create a new repository in a vfs.
2459 """Create a new repository in a vfs.
2460
2460
2461 ``path`` path to the new repo's working directory.
2461 ``path`` path to the new repo's working directory.
2462 ``createopts`` options for the new repository.
2462 ``createopts`` options for the new repository.
2463 """
2463 """
2464 createopts = createopts or {}
2464 createopts = createopts or {}
2465
2465
2466 unknownopts = filterknowncreateopts(ui, createopts)
2466 unknownopts = filterknowncreateopts(ui, createopts)
2467
2467
2468 if not isinstance(unknownopts, dict):
2468 if not isinstance(unknownopts, dict):
2469 raise error.ProgrammingError('filterknowncreateopts() did not return '
2469 raise error.ProgrammingError('filterknowncreateopts() did not return '
2470 'a dict')
2470 'a dict')
2471
2471
2472 if unknownopts:
2472 if unknownopts:
2473 raise error.Abort(_('unable to create repository because of unknown '
2473 raise error.Abort(_('unable to create repository because of unknown '
2474 'creation option: %s') %
2474 'creation option: %s') %
2475 ', '.sorted(unknownopts),
2475 ', '.sorted(unknownopts),
2476 hint=_('is a required extension not loaded?'))
2476 hint=_('is a required extension not loaded?'))
2477
2477
2478 requirements = newreporequirements(ui, createopts=createopts)
2478 requirements = newreporequirements(ui, createopts=createopts)
2479
2479
2480 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2480 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2481 if not wdirvfs.exists():
2481 if not wdirvfs.exists():
2482 wdirvfs.makedirs()
2482 wdirvfs.makedirs()
2483
2483
2484 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2484 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2485 if hgvfs.exists():
2485 if hgvfs.exists():
2486 raise error.RepoError(_('repository %s already exists') % path)
2486 raise error.RepoError(_('repository %s already exists') % path)
2487
2487
2488 hgvfs.makedir(notindexed=True)
2488 hgvfs.makedir(notindexed=True)
2489
2489
2490 if b'store' in requirements:
2490 if b'store' in requirements:
2491 hgvfs.mkdir(b'store')
2491 hgvfs.mkdir(b'store')
2492
2492
2493 # We create an invalid changelog outside the store so very old
2493 # We create an invalid changelog outside the store so very old
2494 # Mercurial versions (which didn't know about the requirements
2494 # Mercurial versions (which didn't know about the requirements
2495 # file) encounter an error on reading the changelog. This
2495 # file) encounter an error on reading the changelog. This
2496 # effectively locks out old clients and prevents them from
2496 # effectively locks out old clients and prevents them from
2497 # mucking with a repo in an unknown format.
2497 # mucking with a repo in an unknown format.
2498 #
2498 #
2499 # The revlog header has version 2, which won't be recognized by
2499 # The revlog header has version 2, which won't be recognized by
2500 # such old clients.
2500 # such old clients.
2501 hgvfs.append(b'00changelog.i',
2501 hgvfs.append(b'00changelog.i',
2502 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2502 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2503 b'layout')
2503 b'layout')
2504
2504
2505 scmutil.writerequires(hgvfs, requirements)
2505 scmutil.writerequires(hgvfs, requirements)
2506
2507 def poisonrepository(repo):
2508 """Poison a repository instance so it can no longer be used."""
2509 # Perform any cleanup on the instance.
2510 repo.close()
2511
2512 # Our strategy is to replace the type of the object with one that
2513 # has all attribute lookups result in error.
2514 #
2515 # But we have to allow the close() method because some constructors
2516 # of repos call close() on repo references.
2517 class poisonedrepository(object):
2518 def __getattribute__(self, item):
2519 if item == r'close':
2520 return object.__getattribute__(self, item)
2521
2522 raise error.ProgrammingError('repo instances should not be used '
2523 'after unshare')
2524
2525 def close(self):
2526 pass
2527
2528 # We may have a repoview, which intercepts __setattr__. So be sure
2529 # we operate at the lowest level possible.
2530 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now