##// END OF EJS Templates
merge: use public interface ms.localctx instead of ms._local
Pulkit Goyal -
r35731:7ffbd911 default
parent child Browse files
Show More
@@ -1,1128 +1,1127 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
19 nullid,
18 nullid,
20 )
19 )
21
20
22 from . import (
21 from . import (
23 bookmarks,
22 bookmarks,
24 bundlerepo,
23 bundlerepo,
25 cmdutil,
24 cmdutil,
26 destutil,
25 destutil,
27 discovery,
26 discovery,
28 error,
27 error,
29 exchange,
28 exchange,
30 extensions,
29 extensions,
31 httppeer,
30 httppeer,
32 localrepo,
31 localrepo,
33 lock,
32 lock,
34 logexchange,
33 logexchange,
35 merge as mergemod,
34 merge as mergemod,
36 node,
35 node,
37 phases,
36 phases,
38 repoview,
37 repoview,
39 scmutil,
38 scmutil,
40 sshpeer,
39 sshpeer,
41 statichttprepo,
40 statichttprepo,
42 ui as uimod,
41 ui as uimod,
43 unionrepo,
42 unionrepo,
44 url,
43 url,
45 util,
44 util,
46 verify as verifymod,
45 verify as verifymod,
47 vfs as vfsmod,
46 vfs as vfsmod,
48 )
47 )
49
48
50 release = lock.release
49 release = lock.release
51
50
52 # shared features
51 # shared features
53 sharedbookmarks = 'bookmarks'
52 sharedbookmarks = 'bookmarks'
54
53
55 def _local(path):
54 def _local(path):
56 path = util.expandpath(util.urllocalpath(path))
55 path = util.expandpath(util.urllocalpath(path))
57 return (os.path.isfile(path) and bundlerepo or localrepo)
56 return (os.path.isfile(path) and bundlerepo or localrepo)
58
57
59 def addbranchrevs(lrepo, other, branches, revs):
58 def addbranchrevs(lrepo, other, branches, revs):
60 peer = other.peer() # a courtesy to callers using a localrepo for other
59 peer = other.peer() # a courtesy to callers using a localrepo for other
61 hashbranch, branches = branches
60 hashbranch, branches = branches
62 if not hashbranch and not branches:
61 if not hashbranch and not branches:
63 x = revs or None
62 x = revs or None
64 if util.safehasattr(revs, 'first'):
63 if util.safehasattr(revs, 'first'):
65 y = revs.first()
64 y = revs.first()
66 elif revs:
65 elif revs:
67 y = revs[0]
66 y = revs[0]
68 else:
67 else:
69 y = None
68 y = None
70 return x, y
69 return x, y
71 if revs:
70 if revs:
72 revs = list(revs)
71 revs = list(revs)
73 else:
72 else:
74 revs = []
73 revs = []
75
74
76 if not peer.capable('branchmap'):
75 if not peer.capable('branchmap'):
77 if branches:
76 if branches:
78 raise error.Abort(_("remote branch lookup not supported"))
77 raise error.Abort(_("remote branch lookup not supported"))
79 revs.append(hashbranch)
78 revs.append(hashbranch)
80 return revs, revs[0]
79 return revs, revs[0]
81 branchmap = peer.branchmap()
80 branchmap = peer.branchmap()
82
81
83 def primary(branch):
82 def primary(branch):
84 if branch == '.':
83 if branch == '.':
85 if not lrepo:
84 if not lrepo:
86 raise error.Abort(_("dirstate branch not accessible"))
85 raise error.Abort(_("dirstate branch not accessible"))
87 branch = lrepo.dirstate.branch()
86 branch = lrepo.dirstate.branch()
88 if branch in branchmap:
87 if branch in branchmap:
89 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
88 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
90 return True
89 return True
91 else:
90 else:
92 return False
91 return False
93
92
94 for branch in branches:
93 for branch in branches:
95 if not primary(branch):
94 if not primary(branch):
96 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
95 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
97 if hashbranch:
96 if hashbranch:
98 if not primary(hashbranch):
97 if not primary(hashbranch):
99 revs.append(hashbranch)
98 revs.append(hashbranch)
100 return revs, revs[0]
99 return revs, revs[0]
101
100
102 def parseurl(path, branches=None):
101 def parseurl(path, branches=None):
103 '''parse url#branch, returning (url, (branch, branches))'''
102 '''parse url#branch, returning (url, (branch, branches))'''
104
103
105 u = util.url(path)
104 u = util.url(path)
106 branch = None
105 branch = None
107 if u.fragment:
106 if u.fragment:
108 branch = u.fragment
107 branch = u.fragment
109 u.fragment = None
108 u.fragment = None
110 return bytes(u), (branch, branches or [])
109 return bytes(u), (branch, branches or [])
111
110
112 schemes = {
111 schemes = {
113 'bundle': bundlerepo,
112 'bundle': bundlerepo,
114 'union': unionrepo,
113 'union': unionrepo,
115 'file': _local,
114 'file': _local,
116 'http': httppeer,
115 'http': httppeer,
117 'https': httppeer,
116 'https': httppeer,
118 'ssh': sshpeer,
117 'ssh': sshpeer,
119 'static-http': statichttprepo,
118 'static-http': statichttprepo,
120 }
119 }
121
120
122 def _peerlookup(path):
121 def _peerlookup(path):
123 u = util.url(path)
122 u = util.url(path)
124 scheme = u.scheme or 'file'
123 scheme = u.scheme or 'file'
125 thing = schemes.get(scheme) or schemes['file']
124 thing = schemes.get(scheme) or schemes['file']
126 try:
125 try:
127 return thing(path)
126 return thing(path)
128 except TypeError:
127 except TypeError:
129 # we can't test callable(thing) because 'thing' can be an unloaded
128 # we can't test callable(thing) because 'thing' can be an unloaded
130 # module that implements __call__
129 # module that implements __call__
131 if not util.safehasattr(thing, 'instance'):
130 if not util.safehasattr(thing, 'instance'):
132 raise
131 raise
133 return thing
132 return thing
134
133
135 def islocal(repo):
134 def islocal(repo):
136 '''return true if repo (or path pointing to repo) is local'''
135 '''return true if repo (or path pointing to repo) is local'''
137 if isinstance(repo, bytes):
136 if isinstance(repo, bytes):
138 try:
137 try:
139 return _peerlookup(repo).islocal(repo)
138 return _peerlookup(repo).islocal(repo)
140 except AttributeError:
139 except AttributeError:
141 return False
140 return False
142 return repo.local()
141 return repo.local()
143
142
144 def openpath(ui, path):
143 def openpath(ui, path):
145 '''open path with open if local, url.open if remote'''
144 '''open path with open if local, url.open if remote'''
146 pathurl = util.url(path, parsequery=False, parsefragment=False)
145 pathurl = util.url(path, parsequery=False, parsefragment=False)
147 if pathurl.islocal():
146 if pathurl.islocal():
148 return util.posixfile(pathurl.localpath(), 'rb')
147 return util.posixfile(pathurl.localpath(), 'rb')
149 else:
148 else:
150 return url.open(ui, path)
149 return url.open(ui, path)
151
150
152 # a list of (ui, repo) functions called for wire peer initialization
151 # a list of (ui, repo) functions called for wire peer initialization
153 wirepeersetupfuncs = []
152 wirepeersetupfuncs = []
154
153
155 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
154 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
156 """return a repository object for the specified path"""
155 """return a repository object for the specified path"""
157 obj = _peerlookup(path).instance(ui, path, create)
156 obj = _peerlookup(path).instance(ui, path, create)
158 ui = getattr(obj, "ui", ui)
157 ui = getattr(obj, "ui", ui)
159 for f in presetupfuncs or []:
158 for f in presetupfuncs or []:
160 f(ui, obj)
159 f(ui, obj)
161 for name, module in extensions.extensions(ui):
160 for name, module in extensions.extensions(ui):
162 hook = getattr(module, 'reposetup', None)
161 hook = getattr(module, 'reposetup', None)
163 if hook:
162 if hook:
164 hook(ui, obj)
163 hook(ui, obj)
165 if not obj.local():
164 if not obj.local():
166 for f in wirepeersetupfuncs:
165 for f in wirepeersetupfuncs:
167 f(ui, obj)
166 f(ui, obj)
168 return obj
167 return obj
169
168
170 def repository(ui, path='', create=False, presetupfuncs=None):
169 def repository(ui, path='', create=False, presetupfuncs=None):
171 """return a repository object for the specified path"""
170 """return a repository object for the specified path"""
172 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
171 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
173 repo = peer.local()
172 repo = peer.local()
174 if not repo:
173 if not repo:
175 raise error.Abort(_("repository '%s' is not local") %
174 raise error.Abort(_("repository '%s' is not local") %
176 (path or peer.url()))
175 (path or peer.url()))
177 return repo.filtered('visible')
176 return repo.filtered('visible')
178
177
179 def peer(uiorrepo, opts, path, create=False):
178 def peer(uiorrepo, opts, path, create=False):
180 '''return a repository peer for the specified path'''
179 '''return a repository peer for the specified path'''
181 rui = remoteui(uiorrepo, opts)
180 rui = remoteui(uiorrepo, opts)
182 return _peerorrepo(rui, path, create).peer()
181 return _peerorrepo(rui, path, create).peer()
183
182
184 def defaultdest(source):
183 def defaultdest(source):
185 '''return default destination of clone if none is given
184 '''return default destination of clone if none is given
186
185
187 >>> defaultdest(b'foo')
186 >>> defaultdest(b'foo')
188 'foo'
187 'foo'
189 >>> defaultdest(b'/foo/bar')
188 >>> defaultdest(b'/foo/bar')
190 'bar'
189 'bar'
191 >>> defaultdest(b'/')
190 >>> defaultdest(b'/')
192 ''
191 ''
193 >>> defaultdest(b'')
192 >>> defaultdest(b'')
194 ''
193 ''
195 >>> defaultdest(b'http://example.org/')
194 >>> defaultdest(b'http://example.org/')
196 ''
195 ''
197 >>> defaultdest(b'http://example.org/foo/')
196 >>> defaultdest(b'http://example.org/foo/')
198 'foo'
197 'foo'
199 '''
198 '''
200 path = util.url(source).path
199 path = util.url(source).path
201 if not path:
200 if not path:
202 return ''
201 return ''
203 return os.path.basename(os.path.normpath(path))
202 return os.path.basename(os.path.normpath(path))
204
203
205 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
204 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
206 relative=False):
205 relative=False):
207 '''create a shared repository'''
206 '''create a shared repository'''
208
207
209 if not islocal(source):
208 if not islocal(source):
210 raise error.Abort(_('can only share local repositories'))
209 raise error.Abort(_('can only share local repositories'))
211
210
212 if not dest:
211 if not dest:
213 dest = defaultdest(source)
212 dest = defaultdest(source)
214 else:
213 else:
215 dest = ui.expandpath(dest)
214 dest = ui.expandpath(dest)
216
215
217 if isinstance(source, str):
216 if isinstance(source, str):
218 origsource = ui.expandpath(source)
217 origsource = ui.expandpath(source)
219 source, branches = parseurl(origsource)
218 source, branches = parseurl(origsource)
220 srcrepo = repository(ui, source)
219 srcrepo = repository(ui, source)
221 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
220 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
222 else:
221 else:
223 srcrepo = source.local()
222 srcrepo = source.local()
224 origsource = source = srcrepo.url()
223 origsource = source = srcrepo.url()
225 checkout = None
224 checkout = None
226
225
227 sharedpath = srcrepo.sharedpath # if our source is already sharing
226 sharedpath = srcrepo.sharedpath # if our source is already sharing
228
227
229 destwvfs = vfsmod.vfs(dest, realpath=True)
228 destwvfs = vfsmod.vfs(dest, realpath=True)
230 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
229 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
231
230
232 if destvfs.lexists():
231 if destvfs.lexists():
233 raise error.Abort(_('destination already exists'))
232 raise error.Abort(_('destination already exists'))
234
233
235 if not destwvfs.isdir():
234 if not destwvfs.isdir():
236 destwvfs.mkdir()
235 destwvfs.mkdir()
237 destvfs.makedir()
236 destvfs.makedir()
238
237
239 requirements = ''
238 requirements = ''
240 try:
239 try:
241 requirements = srcrepo.vfs.read('requires')
240 requirements = srcrepo.vfs.read('requires')
242 except IOError as inst:
241 except IOError as inst:
243 if inst.errno != errno.ENOENT:
242 if inst.errno != errno.ENOENT:
244 raise
243 raise
245
244
246 if relative:
245 if relative:
247 try:
246 try:
248 sharedpath = os.path.relpath(sharedpath, destvfs.base)
247 sharedpath = os.path.relpath(sharedpath, destvfs.base)
249 requirements += 'relshared\n'
248 requirements += 'relshared\n'
250 except (IOError, ValueError) as e:
249 except (IOError, ValueError) as e:
251 # ValueError is raised on Windows if the drive letters differ on
250 # ValueError is raised on Windows if the drive letters differ on
252 # each path
251 # each path
253 raise error.Abort(_('cannot calculate relative path'),
252 raise error.Abort(_('cannot calculate relative path'),
254 hint=str(e))
253 hint=str(e))
255 else:
254 else:
256 requirements += 'shared\n'
255 requirements += 'shared\n'
257
256
258 destvfs.write('requires', requirements)
257 destvfs.write('requires', requirements)
259 destvfs.write('sharedpath', sharedpath)
258 destvfs.write('sharedpath', sharedpath)
260
259
261 r = repository(ui, destwvfs.base)
260 r = repository(ui, destwvfs.base)
262 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
261 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
263 _postshareupdate(r, update, checkout=checkout)
262 _postshareupdate(r, update, checkout=checkout)
264 return r
263 return r
265
264
266 def unshare(ui, repo):
265 def unshare(ui, repo):
267 """convert a shared repository to a normal one
266 """convert a shared repository to a normal one
268
267
269 Copy the store data to the repo and remove the sharedpath data.
268 Copy the store data to the repo and remove the sharedpath data.
270 """
269 """
271
270
272 destlock = lock = None
271 destlock = lock = None
273 lock = repo.lock()
272 lock = repo.lock()
274 try:
273 try:
275 # we use locks here because if we race with commit, we
274 # we use locks here because if we race with commit, we
276 # can end up with extra data in the cloned revlogs that's
275 # can end up with extra data in the cloned revlogs that's
277 # not pointed to by changesets, thus causing verify to
276 # not pointed to by changesets, thus causing verify to
278 # fail
277 # fail
279
278
280 destlock = copystore(ui, repo, repo.path)
279 destlock = copystore(ui, repo, repo.path)
281
280
282 sharefile = repo.vfs.join('sharedpath')
281 sharefile = repo.vfs.join('sharedpath')
283 util.rename(sharefile, sharefile + '.old')
282 util.rename(sharefile, sharefile + '.old')
284
283
285 repo.requirements.discard('shared')
284 repo.requirements.discard('shared')
286 repo.requirements.discard('relshared')
285 repo.requirements.discard('relshared')
287 repo._writerequirements()
286 repo._writerequirements()
288 finally:
287 finally:
289 destlock and destlock.release()
288 destlock and destlock.release()
290 lock and lock.release()
289 lock and lock.release()
291
290
292 # update store, spath, svfs and sjoin of repo
291 # update store, spath, svfs and sjoin of repo
293 repo.unfiltered().__init__(repo.baseui, repo.root)
292 repo.unfiltered().__init__(repo.baseui, repo.root)
294
293
295 # TODO: figure out how to access subrepos that exist, but were previously
294 # TODO: figure out how to access subrepos that exist, but were previously
296 # removed from .hgsub
295 # removed from .hgsub
297 c = repo['.']
296 c = repo['.']
298 subs = c.substate
297 subs = c.substate
299 for s in sorted(subs):
298 for s in sorted(subs):
300 c.sub(s).unshare()
299 c.sub(s).unshare()
301
300
302 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
301 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
303 """Called after a new shared repo is created.
302 """Called after a new shared repo is created.
304
303
305 The new repo only has a requirements file and pointer to the source.
304 The new repo only has a requirements file and pointer to the source.
306 This function configures additional shared data.
305 This function configures additional shared data.
307
306
308 Extensions can wrap this function and write additional entries to
307 Extensions can wrap this function and write additional entries to
309 destrepo/.hg/shared to indicate additional pieces of data to be shared.
308 destrepo/.hg/shared to indicate additional pieces of data to be shared.
310 """
309 """
311 default = defaultpath or sourcerepo.ui.config('paths', 'default')
310 default = defaultpath or sourcerepo.ui.config('paths', 'default')
312 if default:
311 if default:
313 template = ('[paths]\n'
312 template = ('[paths]\n'
314 'default = %s\n')
313 'default = %s\n')
315 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
314 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
316
315
317 with destrepo.wlock():
316 with destrepo.wlock():
318 if bookmarks:
317 if bookmarks:
319 destrepo.vfs.write('shared', sharedbookmarks + '\n')
318 destrepo.vfs.write('shared', sharedbookmarks + '\n')
320
319
321 def _postshareupdate(repo, update, checkout=None):
320 def _postshareupdate(repo, update, checkout=None):
322 """Maybe perform a working directory update after a shared repo is created.
321 """Maybe perform a working directory update after a shared repo is created.
323
322
324 ``update`` can be a boolean or a revision to update to.
323 ``update`` can be a boolean or a revision to update to.
325 """
324 """
326 if not update:
325 if not update:
327 return
326 return
328
327
329 repo.ui.status(_("updating working directory\n"))
328 repo.ui.status(_("updating working directory\n"))
330 if update is not True:
329 if update is not True:
331 checkout = update
330 checkout = update
332 for test in (checkout, 'default', 'tip'):
331 for test in (checkout, 'default', 'tip'):
333 if test is None:
332 if test is None:
334 continue
333 continue
335 try:
334 try:
336 uprev = repo.lookup(test)
335 uprev = repo.lookup(test)
337 break
336 break
338 except error.RepoLookupError:
337 except error.RepoLookupError:
339 continue
338 continue
340 _update(repo, uprev)
339 _update(repo, uprev)
341
340
342 def copystore(ui, srcrepo, destpath):
341 def copystore(ui, srcrepo, destpath):
343 '''copy files from store of srcrepo in destpath
342 '''copy files from store of srcrepo in destpath
344
343
345 returns destlock
344 returns destlock
346 '''
345 '''
347 destlock = None
346 destlock = None
348 try:
347 try:
349 hardlink = None
348 hardlink = None
350 num = 0
349 num = 0
351 closetopic = [None]
350 closetopic = [None]
352 def prog(topic, pos):
351 def prog(topic, pos):
353 if pos is None:
352 if pos is None:
354 closetopic[0] = topic
353 closetopic[0] = topic
355 else:
354 else:
356 ui.progress(topic, pos + num)
355 ui.progress(topic, pos + num)
357 srcpublishing = srcrepo.publishing()
356 srcpublishing = srcrepo.publishing()
358 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
357 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
359 dstvfs = vfsmod.vfs(destpath)
358 dstvfs = vfsmod.vfs(destpath)
360 for f in srcrepo.store.copylist():
359 for f in srcrepo.store.copylist():
361 if srcpublishing and f.endswith('phaseroots'):
360 if srcpublishing and f.endswith('phaseroots'):
362 continue
361 continue
363 dstbase = os.path.dirname(f)
362 dstbase = os.path.dirname(f)
364 if dstbase and not dstvfs.exists(dstbase):
363 if dstbase and not dstvfs.exists(dstbase):
365 dstvfs.mkdir(dstbase)
364 dstvfs.mkdir(dstbase)
366 if srcvfs.exists(f):
365 if srcvfs.exists(f):
367 if f.endswith('data'):
366 if f.endswith('data'):
368 # 'dstbase' may be empty (e.g. revlog format 0)
367 # 'dstbase' may be empty (e.g. revlog format 0)
369 lockfile = os.path.join(dstbase, "lock")
368 lockfile = os.path.join(dstbase, "lock")
370 # lock to avoid premature writing to the target
369 # lock to avoid premature writing to the target
371 destlock = lock.lock(dstvfs, lockfile)
370 destlock = lock.lock(dstvfs, lockfile)
372 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
371 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
373 hardlink, progress=prog)
372 hardlink, progress=prog)
374 num += n
373 num += n
375 if hardlink:
374 if hardlink:
376 ui.debug("linked %d files\n" % num)
375 ui.debug("linked %d files\n" % num)
377 if closetopic[0]:
376 if closetopic[0]:
378 ui.progress(closetopic[0], None)
377 ui.progress(closetopic[0], None)
379 else:
378 else:
380 ui.debug("copied %d files\n" % num)
379 ui.debug("copied %d files\n" % num)
381 if closetopic[0]:
380 if closetopic[0]:
382 ui.progress(closetopic[0], None)
381 ui.progress(closetopic[0], None)
383 return destlock
382 return destlock
384 except: # re-raises
383 except: # re-raises
385 release(destlock)
384 release(destlock)
386 raise
385 raise
387
386
388 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
387 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
389 rev=None, update=True, stream=False):
388 rev=None, update=True, stream=False):
390 """Perform a clone using a shared repo.
389 """Perform a clone using a shared repo.
391
390
392 The store for the repository will be located at <sharepath>/.hg. The
391 The store for the repository will be located at <sharepath>/.hg. The
393 specified revisions will be cloned or pulled from "source". A shared repo
392 specified revisions will be cloned or pulled from "source". A shared repo
394 will be created at "dest" and a working copy will be created if "update" is
393 will be created at "dest" and a working copy will be created if "update" is
395 True.
394 True.
396 """
395 """
397 revs = None
396 revs = None
398 if rev:
397 if rev:
399 if not srcpeer.capable('lookup'):
398 if not srcpeer.capable('lookup'):
400 raise error.Abort(_("src repository does not support "
399 raise error.Abort(_("src repository does not support "
401 "revision lookup and so doesn't "
400 "revision lookup and so doesn't "
402 "support clone by revision"))
401 "support clone by revision"))
403 revs = [srcpeer.lookup(r) for r in rev]
402 revs = [srcpeer.lookup(r) for r in rev]
404
403
405 # Obtain a lock before checking for or cloning the pooled repo otherwise
404 # Obtain a lock before checking for or cloning the pooled repo otherwise
406 # 2 clients may race creating or populating it.
405 # 2 clients may race creating or populating it.
407 pooldir = os.path.dirname(sharepath)
406 pooldir = os.path.dirname(sharepath)
408 # lock class requires the directory to exist.
407 # lock class requires the directory to exist.
409 try:
408 try:
410 util.makedir(pooldir, False)
409 util.makedir(pooldir, False)
411 except OSError as e:
410 except OSError as e:
412 if e.errno != errno.EEXIST:
411 if e.errno != errno.EEXIST:
413 raise
412 raise
414
413
415 poolvfs = vfsmod.vfs(pooldir)
414 poolvfs = vfsmod.vfs(pooldir)
416 basename = os.path.basename(sharepath)
415 basename = os.path.basename(sharepath)
417
416
418 with lock.lock(poolvfs, '%s.lock' % basename):
417 with lock.lock(poolvfs, '%s.lock' % basename):
419 if os.path.exists(sharepath):
418 if os.path.exists(sharepath):
420 ui.status(_('(sharing from existing pooled repository %s)\n') %
419 ui.status(_('(sharing from existing pooled repository %s)\n') %
421 basename)
420 basename)
422 else:
421 else:
423 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
422 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
424 # Always use pull mode because hardlinks in share mode don't work
423 # Always use pull mode because hardlinks in share mode don't work
425 # well. Never update because working copies aren't necessary in
424 # well. Never update because working copies aren't necessary in
426 # share mode.
425 # share mode.
427 clone(ui, peeropts, source, dest=sharepath, pull=True,
426 clone(ui, peeropts, source, dest=sharepath, pull=True,
428 rev=rev, update=False, stream=stream)
427 rev=rev, update=False, stream=stream)
429
428
430 # Resolve the value to put in [paths] section for the source.
429 # Resolve the value to put in [paths] section for the source.
431 if islocal(source):
430 if islocal(source):
432 defaultpath = os.path.abspath(util.urllocalpath(source))
431 defaultpath = os.path.abspath(util.urllocalpath(source))
433 else:
432 else:
434 defaultpath = source
433 defaultpath = source
435
434
436 sharerepo = repository(ui, path=sharepath)
435 sharerepo = repository(ui, path=sharepath)
437 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
436 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
438 defaultpath=defaultpath)
437 defaultpath=defaultpath)
439
438
440 # We need to perform a pull against the dest repo to fetch bookmarks
439 # We need to perform a pull against the dest repo to fetch bookmarks
441 # and other non-store data that isn't shared by default. In the case of
440 # and other non-store data that isn't shared by default. In the case of
442 # non-existing shared repo, this means we pull from the remote twice. This
441 # non-existing shared repo, this means we pull from the remote twice. This
443 # is a bit weird. But at the time it was implemented, there wasn't an easy
442 # is a bit weird. But at the time it was implemented, there wasn't an easy
444 # way to pull just non-changegroup data.
443 # way to pull just non-changegroup data.
445 destrepo = repository(ui, path=dest)
444 destrepo = repository(ui, path=dest)
446 exchange.pull(destrepo, srcpeer, heads=revs)
445 exchange.pull(destrepo, srcpeer, heads=revs)
447
446
448 _postshareupdate(destrepo, update)
447 _postshareupdate(destrepo, update)
449
448
450 return srcpeer, peer(ui, peeropts, dest)
449 return srcpeer, peer(ui, peeropts, dest)
451
450
452 # Recomputing branch cache might be slow on big repos,
451 # Recomputing branch cache might be slow on big repos,
453 # so just copy it
452 # so just copy it
454 def _copycache(srcrepo, dstcachedir, fname):
453 def _copycache(srcrepo, dstcachedir, fname):
455 """copy a cache from srcrepo to destcachedir (if it exists)"""
454 """copy a cache from srcrepo to destcachedir (if it exists)"""
456 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
455 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
457 dstbranchcache = os.path.join(dstcachedir, fname)
456 dstbranchcache = os.path.join(dstcachedir, fname)
458 if os.path.exists(srcbranchcache):
457 if os.path.exists(srcbranchcache):
459 if not os.path.exists(dstcachedir):
458 if not os.path.exists(dstcachedir):
460 os.mkdir(dstcachedir)
459 os.mkdir(dstcachedir)
461 util.copyfile(srcbranchcache, dstbranchcache)
460 util.copyfile(srcbranchcache, dstbranchcache)
462
461
463 def _cachetocopy(srcrepo):
462 def _cachetocopy(srcrepo):
464 """return the list of cache file valuable to copy during a clone"""
463 """return the list of cache file valuable to copy during a clone"""
465 # In local clones we're copying all nodes, not just served
464 # In local clones we're copying all nodes, not just served
466 # ones. Therefore copy all branch caches over.
465 # ones. Therefore copy all branch caches over.
467 cachefiles = ['branch2']
466 cachefiles = ['branch2']
468 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
467 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
469 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
468 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
470 cachefiles += ['tags2']
469 cachefiles += ['tags2']
471 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
470 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
472 cachefiles += ['hgtagsfnodes1']
471 cachefiles += ['hgtagsfnodes1']
473 return cachefiles
472 return cachefiles
474
473
475 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
474 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
476 update=True, stream=False, branch=None, shareopts=None):
475 update=True, stream=False, branch=None, shareopts=None):
477 """Make a copy of an existing repository.
476 """Make a copy of an existing repository.
478
477
479 Create a copy of an existing repository in a new directory. The
478 Create a copy of an existing repository in a new directory. The
480 source and destination are URLs, as passed to the repository
479 source and destination are URLs, as passed to the repository
481 function. Returns a pair of repository peers, the source and
480 function. Returns a pair of repository peers, the source and
482 newly created destination.
481 newly created destination.
483
482
484 The location of the source is added to the new repository's
483 The location of the source is added to the new repository's
485 .hg/hgrc file, as the default to be used for future pulls and
484 .hg/hgrc file, as the default to be used for future pulls and
486 pushes.
485 pushes.
487
486
488 If an exception is raised, the partly cloned/updated destination
487 If an exception is raised, the partly cloned/updated destination
489 repository will be deleted.
488 repository will be deleted.
490
489
491 Arguments:
490 Arguments:
492
491
493 source: repository object or URL
492 source: repository object or URL
494
493
495 dest: URL of destination repository to create (defaults to base
494 dest: URL of destination repository to create (defaults to base
496 name of source repository)
495 name of source repository)
497
496
498 pull: always pull from source repository, even in local case or if the
497 pull: always pull from source repository, even in local case or if the
499 server prefers streaming
498 server prefers streaming
500
499
501 stream: stream raw data uncompressed from repository (fast over
500 stream: stream raw data uncompressed from repository (fast over
502 LAN, slow over WAN)
501 LAN, slow over WAN)
503
502
504 rev: revision to clone up to (implies pull=True)
503 rev: revision to clone up to (implies pull=True)
505
504
506 update: update working directory after clone completes, if
505 update: update working directory after clone completes, if
507 destination is local repository (True means update to default rev,
506 destination is local repository (True means update to default rev,
508 anything else is treated as a revision)
507 anything else is treated as a revision)
509
508
510 branch: branches to clone
509 branch: branches to clone
511
510
512 shareopts: dict of options to control auto sharing behavior. The "pool" key
511 shareopts: dict of options to control auto sharing behavior. The "pool" key
513 activates auto sharing mode and defines the directory for stores. The
512 activates auto sharing mode and defines the directory for stores. The
514 "mode" key determines how to construct the directory name of the shared
513 "mode" key determines how to construct the directory name of the shared
515 repository. "identity" means the name is derived from the node of the first
514 repository. "identity" means the name is derived from the node of the first
516 changeset in the repository. "remote" means the name is derived from the
515 changeset in the repository. "remote" means the name is derived from the
517 remote's path/URL. Defaults to "identity."
516 remote's path/URL. Defaults to "identity."
518 """
517 """
519
518
520 if isinstance(source, bytes):
519 if isinstance(source, bytes):
521 origsource = ui.expandpath(source)
520 origsource = ui.expandpath(source)
522 source, branch = parseurl(origsource, branch)
521 source, branch = parseurl(origsource, branch)
523 srcpeer = peer(ui, peeropts, source)
522 srcpeer = peer(ui, peeropts, source)
524 else:
523 else:
525 srcpeer = source.peer() # in case we were called with a localrepo
524 srcpeer = source.peer() # in case we were called with a localrepo
526 branch = (None, branch or [])
525 branch = (None, branch or [])
527 origsource = source = srcpeer.url()
526 origsource = source = srcpeer.url()
528 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
527 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
529
528
530 if dest is None:
529 if dest is None:
531 dest = defaultdest(source)
530 dest = defaultdest(source)
532 if dest:
531 if dest:
533 ui.status(_("destination directory: %s\n") % dest)
532 ui.status(_("destination directory: %s\n") % dest)
534 else:
533 else:
535 dest = ui.expandpath(dest)
534 dest = ui.expandpath(dest)
536
535
537 dest = util.urllocalpath(dest)
536 dest = util.urllocalpath(dest)
538 source = util.urllocalpath(source)
537 source = util.urllocalpath(source)
539
538
540 if not dest:
539 if not dest:
541 raise error.Abort(_("empty destination path is not valid"))
540 raise error.Abort(_("empty destination path is not valid"))
542
541
543 destvfs = vfsmod.vfs(dest, expandpath=True)
542 destvfs = vfsmod.vfs(dest, expandpath=True)
544 if destvfs.lexists():
543 if destvfs.lexists():
545 if not destvfs.isdir():
544 if not destvfs.isdir():
546 raise error.Abort(_("destination '%s' already exists") % dest)
545 raise error.Abort(_("destination '%s' already exists") % dest)
547 elif destvfs.listdir():
546 elif destvfs.listdir():
548 raise error.Abort(_("destination '%s' is not empty") % dest)
547 raise error.Abort(_("destination '%s' is not empty") % dest)
549
548
550 shareopts = shareopts or {}
549 shareopts = shareopts or {}
551 sharepool = shareopts.get('pool')
550 sharepool = shareopts.get('pool')
552 sharenamemode = shareopts.get('mode')
551 sharenamemode = shareopts.get('mode')
553 if sharepool and islocal(dest):
552 if sharepool and islocal(dest):
554 sharepath = None
553 sharepath = None
555 if sharenamemode == 'identity':
554 if sharenamemode == 'identity':
556 # Resolve the name from the initial changeset in the remote
555 # Resolve the name from the initial changeset in the remote
557 # repository. This returns nullid when the remote is empty. It
556 # repository. This returns nullid when the remote is empty. It
558 # raises RepoLookupError if revision 0 is filtered or otherwise
557 # raises RepoLookupError if revision 0 is filtered or otherwise
559 # not available. If we fail to resolve, sharing is not enabled.
558 # not available. If we fail to resolve, sharing is not enabled.
560 try:
559 try:
561 rootnode = srcpeer.lookup('0')
560 rootnode = srcpeer.lookup('0')
562 if rootnode != node.nullid:
561 if rootnode != node.nullid:
563 sharepath = os.path.join(sharepool, node.hex(rootnode))
562 sharepath = os.path.join(sharepool, node.hex(rootnode))
564 else:
563 else:
565 ui.status(_('(not using pooled storage: '
564 ui.status(_('(not using pooled storage: '
566 'remote appears to be empty)\n'))
565 'remote appears to be empty)\n'))
567 except error.RepoLookupError:
566 except error.RepoLookupError:
568 ui.status(_('(not using pooled storage: '
567 ui.status(_('(not using pooled storage: '
569 'unable to resolve identity of remote)\n'))
568 'unable to resolve identity of remote)\n'))
570 elif sharenamemode == 'remote':
569 elif sharenamemode == 'remote':
571 sharepath = os.path.join(
570 sharepath = os.path.join(
572 sharepool, node.hex(hashlib.sha1(source).digest()))
571 sharepool, node.hex(hashlib.sha1(source).digest()))
573 else:
572 else:
574 raise error.Abort(_('unknown share naming mode: %s') %
573 raise error.Abort(_('unknown share naming mode: %s') %
575 sharenamemode)
574 sharenamemode)
576
575
577 if sharepath:
576 if sharepath:
578 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
577 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
579 dest, pull=pull, rev=rev, update=update,
578 dest, pull=pull, rev=rev, update=update,
580 stream=stream)
579 stream=stream)
581
580
582 srclock = destlock = cleandir = None
581 srclock = destlock = cleandir = None
583 srcrepo = srcpeer.local()
582 srcrepo = srcpeer.local()
584 try:
583 try:
585 abspath = origsource
584 abspath = origsource
586 if islocal(origsource):
585 if islocal(origsource):
587 abspath = os.path.abspath(util.urllocalpath(origsource))
586 abspath = os.path.abspath(util.urllocalpath(origsource))
588
587
589 if islocal(dest):
588 if islocal(dest):
590 cleandir = dest
589 cleandir = dest
591
590
592 copy = False
591 copy = False
593 if (srcrepo and srcrepo.cancopy() and islocal(dest)
592 if (srcrepo and srcrepo.cancopy() and islocal(dest)
594 and not phases.hassecret(srcrepo)):
593 and not phases.hassecret(srcrepo)):
595 copy = not pull and not rev
594 copy = not pull and not rev
596
595
597 if copy:
596 if copy:
598 try:
597 try:
599 # we use a lock here because if we race with commit, we
598 # we use a lock here because if we race with commit, we
600 # can end up with extra data in the cloned revlogs that's
599 # can end up with extra data in the cloned revlogs that's
601 # not pointed to by changesets, thus causing verify to
600 # not pointed to by changesets, thus causing verify to
602 # fail
601 # fail
603 srclock = srcrepo.lock(wait=False)
602 srclock = srcrepo.lock(wait=False)
604 except error.LockError:
603 except error.LockError:
605 copy = False
604 copy = False
606
605
607 if copy:
606 if copy:
608 srcrepo.hook('preoutgoing', throw=True, source='clone')
607 srcrepo.hook('preoutgoing', throw=True, source='clone')
609 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
608 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
610 if not os.path.exists(dest):
609 if not os.path.exists(dest):
611 os.mkdir(dest)
610 os.mkdir(dest)
612 else:
611 else:
613 # only clean up directories we create ourselves
612 # only clean up directories we create ourselves
614 cleandir = hgdir
613 cleandir = hgdir
615 try:
614 try:
616 destpath = hgdir
615 destpath = hgdir
617 util.makedir(destpath, notindexed=True)
616 util.makedir(destpath, notindexed=True)
618 except OSError as inst:
617 except OSError as inst:
619 if inst.errno == errno.EEXIST:
618 if inst.errno == errno.EEXIST:
620 cleandir = None
619 cleandir = None
621 raise error.Abort(_("destination '%s' already exists")
620 raise error.Abort(_("destination '%s' already exists")
622 % dest)
621 % dest)
623 raise
622 raise
624
623
625 destlock = copystore(ui, srcrepo, destpath)
624 destlock = copystore(ui, srcrepo, destpath)
626 # copy bookmarks over
625 # copy bookmarks over
627 srcbookmarks = srcrepo.vfs.join('bookmarks')
626 srcbookmarks = srcrepo.vfs.join('bookmarks')
628 dstbookmarks = os.path.join(destpath, 'bookmarks')
627 dstbookmarks = os.path.join(destpath, 'bookmarks')
629 if os.path.exists(srcbookmarks):
628 if os.path.exists(srcbookmarks):
630 util.copyfile(srcbookmarks, dstbookmarks)
629 util.copyfile(srcbookmarks, dstbookmarks)
631
630
632 dstcachedir = os.path.join(destpath, 'cache')
631 dstcachedir = os.path.join(destpath, 'cache')
633 for cache in _cachetocopy(srcrepo):
632 for cache in _cachetocopy(srcrepo):
634 _copycache(srcrepo, dstcachedir, cache)
633 _copycache(srcrepo, dstcachedir, cache)
635
634
636 # we need to re-init the repo after manually copying the data
635 # we need to re-init the repo after manually copying the data
637 # into it
636 # into it
638 destpeer = peer(srcrepo, peeropts, dest)
637 destpeer = peer(srcrepo, peeropts, dest)
639 srcrepo.hook('outgoing', source='clone',
638 srcrepo.hook('outgoing', source='clone',
640 node=node.hex(node.nullid))
639 node=node.hex(node.nullid))
641 else:
640 else:
642 try:
641 try:
643 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
642 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
644 # only pass ui when no srcrepo
643 # only pass ui when no srcrepo
645 except OSError as inst:
644 except OSError as inst:
646 if inst.errno == errno.EEXIST:
645 if inst.errno == errno.EEXIST:
647 cleandir = None
646 cleandir = None
648 raise error.Abort(_("destination '%s' already exists")
647 raise error.Abort(_("destination '%s' already exists")
649 % dest)
648 % dest)
650 raise
649 raise
651
650
652 revs = None
651 revs = None
653 if rev:
652 if rev:
654 if not srcpeer.capable('lookup'):
653 if not srcpeer.capable('lookup'):
655 raise error.Abort(_("src repository does not support "
654 raise error.Abort(_("src repository does not support "
656 "revision lookup and so doesn't "
655 "revision lookup and so doesn't "
657 "support clone by revision"))
656 "support clone by revision"))
658 revs = [srcpeer.lookup(r) for r in rev]
657 revs = [srcpeer.lookup(r) for r in rev]
659 checkout = revs[0]
658 checkout = revs[0]
660 local = destpeer.local()
659 local = destpeer.local()
661 if local:
660 if local:
662 u = util.url(abspath)
661 u = util.url(abspath)
663 defaulturl = bytes(u)
662 defaulturl = bytes(u)
664 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
663 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
665 if not stream:
664 if not stream:
666 if pull:
665 if pull:
667 stream = False
666 stream = False
668 else:
667 else:
669 stream = None
668 stream = None
670 # internal config: ui.quietbookmarkmove
669 # internal config: ui.quietbookmarkmove
671 overrides = {('ui', 'quietbookmarkmove'): True}
670 overrides = {('ui', 'quietbookmarkmove'): True}
672 with local.ui.configoverride(overrides, 'clone'):
671 with local.ui.configoverride(overrides, 'clone'):
673 exchange.pull(local, srcpeer, revs,
672 exchange.pull(local, srcpeer, revs,
674 streamclonerequested=stream)
673 streamclonerequested=stream)
675 elif srcrepo:
674 elif srcrepo:
676 exchange.push(srcrepo, destpeer, revs=revs,
675 exchange.push(srcrepo, destpeer, revs=revs,
677 bookmarks=srcrepo._bookmarks.keys())
676 bookmarks=srcrepo._bookmarks.keys())
678 else:
677 else:
679 raise error.Abort(_("clone from remote to remote not supported")
678 raise error.Abort(_("clone from remote to remote not supported")
680 )
679 )
681
680
682 cleandir = None
681 cleandir = None
683
682
684 destrepo = destpeer.local()
683 destrepo = destpeer.local()
685 if destrepo:
684 if destrepo:
686 template = uimod.samplehgrcs['cloned']
685 template = uimod.samplehgrcs['cloned']
687 u = util.url(abspath)
686 u = util.url(abspath)
688 u.passwd = None
687 u.passwd = None
689 defaulturl = bytes(u)
688 defaulturl = bytes(u)
690 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
689 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
691 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
690 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
692
691
693 if ui.configbool('experimental', 'remotenames'):
692 if ui.configbool('experimental', 'remotenames'):
694 logexchange.pullremotenames(destrepo, srcpeer)
693 logexchange.pullremotenames(destrepo, srcpeer)
695
694
696 if update:
695 if update:
697 if update is not True:
696 if update is not True:
698 checkout = srcpeer.lookup(update)
697 checkout = srcpeer.lookup(update)
699 uprev = None
698 uprev = None
700 status = None
699 status = None
701 if checkout is not None:
700 if checkout is not None:
702 try:
701 try:
703 uprev = destrepo.lookup(checkout)
702 uprev = destrepo.lookup(checkout)
704 except error.RepoLookupError:
703 except error.RepoLookupError:
705 if update is not True:
704 if update is not True:
706 try:
705 try:
707 uprev = destrepo.lookup(update)
706 uprev = destrepo.lookup(update)
708 except error.RepoLookupError:
707 except error.RepoLookupError:
709 pass
708 pass
710 if uprev is None:
709 if uprev is None:
711 try:
710 try:
712 uprev = destrepo._bookmarks['@']
711 uprev = destrepo._bookmarks['@']
713 update = '@'
712 update = '@'
714 bn = destrepo[uprev].branch()
713 bn = destrepo[uprev].branch()
715 if bn == 'default':
714 if bn == 'default':
716 status = _("updating to bookmark @\n")
715 status = _("updating to bookmark @\n")
717 else:
716 else:
718 status = (_("updating to bookmark @ on branch %s\n")
717 status = (_("updating to bookmark @ on branch %s\n")
719 % bn)
718 % bn)
720 except KeyError:
719 except KeyError:
721 try:
720 try:
722 uprev = destrepo.branchtip('default')
721 uprev = destrepo.branchtip('default')
723 except error.RepoLookupError:
722 except error.RepoLookupError:
724 uprev = destrepo.lookup('tip')
723 uprev = destrepo.lookup('tip')
725 if not status:
724 if not status:
726 bn = destrepo[uprev].branch()
725 bn = destrepo[uprev].branch()
727 status = _("updating to branch %s\n") % bn
726 status = _("updating to branch %s\n") % bn
728 destrepo.ui.status(status)
727 destrepo.ui.status(status)
729 _update(destrepo, uprev)
728 _update(destrepo, uprev)
730 if update in destrepo._bookmarks:
729 if update in destrepo._bookmarks:
731 bookmarks.activate(destrepo, update)
730 bookmarks.activate(destrepo, update)
732 finally:
731 finally:
733 release(srclock, destlock)
732 release(srclock, destlock)
734 if cleandir is not None:
733 if cleandir is not None:
735 shutil.rmtree(cleandir, True)
734 shutil.rmtree(cleandir, True)
736 if srcpeer is not None:
735 if srcpeer is not None:
737 srcpeer.close()
736 srcpeer.close()
738 return srcpeer, destpeer
737 return srcpeer, destpeer
739
738
740 def _showstats(repo, stats, quietempty=False):
739 def _showstats(repo, stats, quietempty=False):
741 if quietempty and not any(stats):
740 if quietempty and not any(stats):
742 return
741 return
743 repo.ui.status(_("%d files updated, %d files merged, "
742 repo.ui.status(_("%d files updated, %d files merged, "
744 "%d files removed, %d files unresolved\n") % stats)
743 "%d files removed, %d files unresolved\n") % stats)
745
744
746 def updaterepo(repo, node, overwrite, updatecheck=None):
745 def updaterepo(repo, node, overwrite, updatecheck=None):
747 """Update the working directory to node.
746 """Update the working directory to node.
748
747
749 When overwrite is set, changes are clobbered, merged else
748 When overwrite is set, changes are clobbered, merged else
750
749
751 returns stats (see pydoc mercurial.merge.applyupdates)"""
750 returns stats (see pydoc mercurial.merge.applyupdates)"""
752 return mergemod.update(repo, node, False, overwrite,
751 return mergemod.update(repo, node, False, overwrite,
753 labels=['working copy', 'destination'],
752 labels=['working copy', 'destination'],
754 updatecheck=updatecheck)
753 updatecheck=updatecheck)
755
754
756 def update(repo, node, quietempty=False, updatecheck=None):
755 def update(repo, node, quietempty=False, updatecheck=None):
757 """update the working directory to node"""
756 """update the working directory to node"""
758 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
757 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
759 _showstats(repo, stats, quietempty)
758 _showstats(repo, stats, quietempty)
760 if stats[3]:
759 if stats[3]:
761 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
760 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
762 return stats[3] > 0
761 return stats[3] > 0
763
762
764 # naming conflict in clone()
763 # naming conflict in clone()
765 _update = update
764 _update = update
766
765
767 def clean(repo, node, show_stats=True, quietempty=False):
766 def clean(repo, node, show_stats=True, quietempty=False):
768 """forcibly switch the working directory to node, clobbering changes"""
767 """forcibly switch the working directory to node, clobbering changes"""
769 stats = updaterepo(repo, node, True)
768 stats = updaterepo(repo, node, True)
770 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
769 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
771 if show_stats:
770 if show_stats:
772 _showstats(repo, stats, quietempty)
771 _showstats(repo, stats, quietempty)
773 return stats[3] > 0
772 return stats[3] > 0
774
773
775 # naming conflict in updatetotally()
774 # naming conflict in updatetotally()
776 _clean = clean
775 _clean = clean
777
776
778 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
777 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
779 """Update the working directory with extra care for non-file components
778 """Update the working directory with extra care for non-file components
780
779
781 This takes care of non-file components below:
780 This takes care of non-file components below:
782
781
783 :bookmark: might be advanced or (in)activated
782 :bookmark: might be advanced or (in)activated
784
783
785 This takes arguments below:
784 This takes arguments below:
786
785
787 :checkout: to which revision the working directory is updated
786 :checkout: to which revision the working directory is updated
788 :brev: a name, which might be a bookmark to be activated after updating
787 :brev: a name, which might be a bookmark to be activated after updating
789 :clean: whether changes in the working directory can be discarded
788 :clean: whether changes in the working directory can be discarded
790 :updatecheck: how to deal with a dirty working directory
789 :updatecheck: how to deal with a dirty working directory
791
790
792 Valid values for updatecheck are (None => linear):
791 Valid values for updatecheck are (None => linear):
793
792
794 * abort: abort if the working directory is dirty
793 * abort: abort if the working directory is dirty
795 * none: don't check (merge working directory changes into destination)
794 * none: don't check (merge working directory changes into destination)
796 * linear: check that update is linear before merging working directory
795 * linear: check that update is linear before merging working directory
797 changes into destination
796 changes into destination
798 * noconflict: check that the update does not result in file merges
797 * noconflict: check that the update does not result in file merges
799
798
800 This returns whether conflict is detected at updating or not.
799 This returns whether conflict is detected at updating or not.
801 """
800 """
802 if updatecheck is None:
801 if updatecheck is None:
803 updatecheck = ui.config('commands', 'update.check')
802 updatecheck = ui.config('commands', 'update.check')
804 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
803 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
805 # If not configured, or invalid value configured
804 # If not configured, or invalid value configured
806 updatecheck = 'linear'
805 updatecheck = 'linear'
807 with repo.wlock():
806 with repo.wlock():
808 movemarkfrom = None
807 movemarkfrom = None
809 warndest = False
808 warndest = False
810 if checkout is None:
809 if checkout is None:
811 updata = destutil.destupdate(repo, clean=clean)
810 updata = destutil.destupdate(repo, clean=clean)
812 checkout, movemarkfrom, brev = updata
811 checkout, movemarkfrom, brev = updata
813 warndest = True
812 warndest = True
814
813
815 if clean:
814 if clean:
816 ret = _clean(repo, checkout)
815 ret = _clean(repo, checkout)
817 else:
816 else:
818 if updatecheck == 'abort':
817 if updatecheck == 'abort':
819 cmdutil.bailifchanged(repo, merge=False)
818 cmdutil.bailifchanged(repo, merge=False)
820 updatecheck = 'none'
819 updatecheck = 'none'
821 ret = _update(repo, checkout, updatecheck=updatecheck)
820 ret = _update(repo, checkout, updatecheck=updatecheck)
822
821
823 if not ret and movemarkfrom:
822 if not ret and movemarkfrom:
824 if movemarkfrom == repo['.'].node():
823 if movemarkfrom == repo['.'].node():
825 pass # no-op update
824 pass # no-op update
826 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
825 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
827 b = ui.label(repo._activebookmark, 'bookmarks.active')
826 b = ui.label(repo._activebookmark, 'bookmarks.active')
828 ui.status(_("updating bookmark %s\n") % b)
827 ui.status(_("updating bookmark %s\n") % b)
829 else:
828 else:
830 # this can happen with a non-linear update
829 # this can happen with a non-linear update
831 b = ui.label(repo._activebookmark, 'bookmarks')
830 b = ui.label(repo._activebookmark, 'bookmarks')
832 ui.status(_("(leaving bookmark %s)\n") % b)
831 ui.status(_("(leaving bookmark %s)\n") % b)
833 bookmarks.deactivate(repo)
832 bookmarks.deactivate(repo)
834 elif brev in repo._bookmarks:
833 elif brev in repo._bookmarks:
835 if brev != repo._activebookmark:
834 if brev != repo._activebookmark:
836 b = ui.label(brev, 'bookmarks.active')
835 b = ui.label(brev, 'bookmarks.active')
837 ui.status(_("(activating bookmark %s)\n") % b)
836 ui.status(_("(activating bookmark %s)\n") % b)
838 bookmarks.activate(repo, brev)
837 bookmarks.activate(repo, brev)
839 elif brev:
838 elif brev:
840 if repo._activebookmark:
839 if repo._activebookmark:
841 b = ui.label(repo._activebookmark, 'bookmarks')
840 b = ui.label(repo._activebookmark, 'bookmarks')
842 ui.status(_("(leaving bookmark %s)\n") % b)
841 ui.status(_("(leaving bookmark %s)\n") % b)
843 bookmarks.deactivate(repo)
842 bookmarks.deactivate(repo)
844
843
845 if warndest:
844 if warndest:
846 destutil.statusotherdests(ui, repo)
845 destutil.statusotherdests(ui, repo)
847
846
848 return ret
847 return ret
849
848
850 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
849 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
851 abort=False):
850 abort=False):
852 """Branch merge with node, resolving changes. Return true if any
851 """Branch merge with node, resolving changes. Return true if any
853 unresolved conflicts."""
852 unresolved conflicts."""
854 if not abort:
853 if not abort:
855 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
854 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
856 labels=labels)
855 labels=labels)
857 else:
856 else:
858 ms = mergemod.mergestate.read(repo)
857 ms = mergemod.mergestate.read(repo)
859 if ms.active():
858 if ms.active():
860 # there were conflicts
859 # there were conflicts
861 node = hex(ms._local)
860 node = ms.localctx.hex()
862 else:
861 else:
863 # there were no conficts, mergestate was not stored
862 # there were no conficts, mergestate was not stored
864 node = repo['.'].hex()
863 node = repo['.'].hex()
865
864
866 repo.ui.status(_("aborting the merge, updating back to"
865 repo.ui.status(_("aborting the merge, updating back to"
867 " %s\n") % node[:12])
866 " %s\n") % node[:12])
868 stats = mergemod.update(repo, node, branchmerge=False, force=True,
867 stats = mergemod.update(repo, node, branchmerge=False, force=True,
869 labels=labels)
868 labels=labels)
870
869
871 _showstats(repo, stats)
870 _showstats(repo, stats)
872 if stats[3]:
871 if stats[3]:
873 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
872 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
874 "or 'hg merge --abort' to abandon\n"))
873 "or 'hg merge --abort' to abandon\n"))
875 elif remind and not abort:
874 elif remind and not abort:
876 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
875 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
877 return stats[3] > 0
876 return stats[3] > 0
878
877
879 def _incoming(displaychlist, subreporecurse, ui, repo, source,
878 def _incoming(displaychlist, subreporecurse, ui, repo, source,
880 opts, buffered=False):
879 opts, buffered=False):
881 """
880 """
882 Helper for incoming / gincoming.
881 Helper for incoming / gincoming.
883 displaychlist gets called with
882 displaychlist gets called with
884 (remoterepo, incomingchangesetlist, displayer) parameters,
883 (remoterepo, incomingchangesetlist, displayer) parameters,
885 and is supposed to contain only code that can't be unified.
884 and is supposed to contain only code that can't be unified.
886 """
885 """
887 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
886 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
888 other = peer(repo, opts, source)
887 other = peer(repo, opts, source)
889 ui.status(_('comparing with %s\n') % util.hidepassword(source))
888 ui.status(_('comparing with %s\n') % util.hidepassword(source))
890 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
889 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
891
890
892 if revs:
891 if revs:
893 revs = [other.lookup(rev) for rev in revs]
892 revs = [other.lookup(rev) for rev in revs]
894 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
893 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
895 revs, opts["bundle"], opts["force"])
894 revs, opts["bundle"], opts["force"])
896 try:
895 try:
897 if not chlist:
896 if not chlist:
898 ui.status(_("no changes found\n"))
897 ui.status(_("no changes found\n"))
899 return subreporecurse()
898 return subreporecurse()
900 ui.pager('incoming')
899 ui.pager('incoming')
901 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
900 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
902 displaychlist(other, chlist, displayer)
901 displaychlist(other, chlist, displayer)
903 displayer.close()
902 displayer.close()
904 finally:
903 finally:
905 cleanupfn()
904 cleanupfn()
906 subreporecurse()
905 subreporecurse()
907 return 0 # exit code is zero since we found incoming changes
906 return 0 # exit code is zero since we found incoming changes
908
907
909 def incoming(ui, repo, source, opts):
908 def incoming(ui, repo, source, opts):
910 def subreporecurse():
909 def subreporecurse():
911 ret = 1
910 ret = 1
912 if opts.get('subrepos'):
911 if opts.get('subrepos'):
913 ctx = repo[None]
912 ctx = repo[None]
914 for subpath in sorted(ctx.substate):
913 for subpath in sorted(ctx.substate):
915 sub = ctx.sub(subpath)
914 sub = ctx.sub(subpath)
916 ret = min(ret, sub.incoming(ui, source, opts))
915 ret = min(ret, sub.incoming(ui, source, opts))
917 return ret
916 return ret
918
917
919 def display(other, chlist, displayer):
918 def display(other, chlist, displayer):
920 limit = cmdutil.loglimit(opts)
919 limit = cmdutil.loglimit(opts)
921 if opts.get('newest_first'):
920 if opts.get('newest_first'):
922 chlist.reverse()
921 chlist.reverse()
923 count = 0
922 count = 0
924 for n in chlist:
923 for n in chlist:
925 if limit is not None and count >= limit:
924 if limit is not None and count >= limit:
926 break
925 break
927 parents = [p for p in other.changelog.parents(n) if p != nullid]
926 parents = [p for p in other.changelog.parents(n) if p != nullid]
928 if opts.get('no_merges') and len(parents) == 2:
927 if opts.get('no_merges') and len(parents) == 2:
929 continue
928 continue
930 count += 1
929 count += 1
931 displayer.show(other[n])
930 displayer.show(other[n])
932 return _incoming(display, subreporecurse, ui, repo, source, opts)
931 return _incoming(display, subreporecurse, ui, repo, source, opts)
933
932
934 def _outgoing(ui, repo, dest, opts):
933 def _outgoing(ui, repo, dest, opts):
935 path = ui.paths.getpath(dest, default=('default-push', 'default'))
934 path = ui.paths.getpath(dest, default=('default-push', 'default'))
936 if not path:
935 if not path:
937 raise error.Abort(_('default repository not configured!'),
936 raise error.Abort(_('default repository not configured!'),
938 hint=_("see 'hg help config.paths'"))
937 hint=_("see 'hg help config.paths'"))
939 dest = path.pushloc or path.loc
938 dest = path.pushloc or path.loc
940 branches = path.branch, opts.get('branch') or []
939 branches = path.branch, opts.get('branch') or []
941
940
942 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
941 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
943 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
942 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
944 if revs:
943 if revs:
945 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
944 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
946
945
947 other = peer(repo, opts, dest)
946 other = peer(repo, opts, dest)
948 outgoing = discovery.findcommonoutgoing(repo, other, revs,
947 outgoing = discovery.findcommonoutgoing(repo, other, revs,
949 force=opts.get('force'))
948 force=opts.get('force'))
950 o = outgoing.missing
949 o = outgoing.missing
951 if not o:
950 if not o:
952 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
951 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
953 return o, other
952 return o, other
954
953
955 def outgoing(ui, repo, dest, opts):
954 def outgoing(ui, repo, dest, opts):
956 def recurse():
955 def recurse():
957 ret = 1
956 ret = 1
958 if opts.get('subrepos'):
957 if opts.get('subrepos'):
959 ctx = repo[None]
958 ctx = repo[None]
960 for subpath in sorted(ctx.substate):
959 for subpath in sorted(ctx.substate):
961 sub = ctx.sub(subpath)
960 sub = ctx.sub(subpath)
962 ret = min(ret, sub.outgoing(ui, dest, opts))
961 ret = min(ret, sub.outgoing(ui, dest, opts))
963 return ret
962 return ret
964
963
965 limit = cmdutil.loglimit(opts)
964 limit = cmdutil.loglimit(opts)
966 o, other = _outgoing(ui, repo, dest, opts)
965 o, other = _outgoing(ui, repo, dest, opts)
967 if not o:
966 if not o:
968 cmdutil.outgoinghooks(ui, repo, other, opts, o)
967 cmdutil.outgoinghooks(ui, repo, other, opts, o)
969 return recurse()
968 return recurse()
970
969
971 if opts.get('newest_first'):
970 if opts.get('newest_first'):
972 o.reverse()
971 o.reverse()
973 ui.pager('outgoing')
972 ui.pager('outgoing')
974 displayer = cmdutil.show_changeset(ui, repo, opts)
973 displayer = cmdutil.show_changeset(ui, repo, opts)
975 count = 0
974 count = 0
976 for n in o:
975 for n in o:
977 if limit is not None and count >= limit:
976 if limit is not None and count >= limit:
978 break
977 break
979 parents = [p for p in repo.changelog.parents(n) if p != nullid]
978 parents = [p for p in repo.changelog.parents(n) if p != nullid]
980 if opts.get('no_merges') and len(parents) == 2:
979 if opts.get('no_merges') and len(parents) == 2:
981 continue
980 continue
982 count += 1
981 count += 1
983 displayer.show(repo[n])
982 displayer.show(repo[n])
984 displayer.close()
983 displayer.close()
985 cmdutil.outgoinghooks(ui, repo, other, opts, o)
984 cmdutil.outgoinghooks(ui, repo, other, opts, o)
986 recurse()
985 recurse()
987 return 0 # exit code is zero since we found outgoing changes
986 return 0 # exit code is zero since we found outgoing changes
988
987
989 def verify(repo):
988 def verify(repo):
990 """verify the consistency of a repository"""
989 """verify the consistency of a repository"""
991 ret = verifymod.verify(repo)
990 ret = verifymod.verify(repo)
992
991
993 # Broken subrepo references in hidden csets don't seem worth worrying about,
992 # Broken subrepo references in hidden csets don't seem worth worrying about,
994 # since they can't be pushed/pulled, and --hidden can be used if they are a
993 # since they can't be pushed/pulled, and --hidden can be used if they are a
995 # concern.
994 # concern.
996
995
997 # pathto() is needed for -R case
996 # pathto() is needed for -R case
998 revs = repo.revs("filelog(%s)",
997 revs = repo.revs("filelog(%s)",
999 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
998 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1000
999
1001 if revs:
1000 if revs:
1002 repo.ui.status(_('checking subrepo links\n'))
1001 repo.ui.status(_('checking subrepo links\n'))
1003 for rev in revs:
1002 for rev in revs:
1004 ctx = repo[rev]
1003 ctx = repo[rev]
1005 try:
1004 try:
1006 for subpath in ctx.substate:
1005 for subpath in ctx.substate:
1007 try:
1006 try:
1008 ret = (ctx.sub(subpath, allowcreate=False).verify()
1007 ret = (ctx.sub(subpath, allowcreate=False).verify()
1009 or ret)
1008 or ret)
1010 except error.RepoError as e:
1009 except error.RepoError as e:
1011 repo.ui.warn(('%s: %s\n') % (rev, e))
1010 repo.ui.warn(('%s: %s\n') % (rev, e))
1012 except Exception:
1011 except Exception:
1013 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1012 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1014 node.short(ctx.node()))
1013 node.short(ctx.node()))
1015
1014
1016 return ret
1015 return ret
1017
1016
1018 def remoteui(src, opts):
1017 def remoteui(src, opts):
1019 'build a remote ui from ui or repo and opts'
1018 'build a remote ui from ui or repo and opts'
1020 if util.safehasattr(src, 'baseui'): # looks like a repository
1019 if util.safehasattr(src, 'baseui'): # looks like a repository
1021 dst = src.baseui.copy() # drop repo-specific config
1020 dst = src.baseui.copy() # drop repo-specific config
1022 src = src.ui # copy target options from repo
1021 src = src.ui # copy target options from repo
1023 else: # assume it's a global ui object
1022 else: # assume it's a global ui object
1024 dst = src.copy() # keep all global options
1023 dst = src.copy() # keep all global options
1025
1024
1026 # copy ssh-specific options
1025 # copy ssh-specific options
1027 for o in 'ssh', 'remotecmd':
1026 for o in 'ssh', 'remotecmd':
1028 v = opts.get(o) or src.config('ui', o)
1027 v = opts.get(o) or src.config('ui', o)
1029 if v:
1028 if v:
1030 dst.setconfig("ui", o, v, 'copied')
1029 dst.setconfig("ui", o, v, 'copied')
1031
1030
1032 # copy bundle-specific options
1031 # copy bundle-specific options
1033 r = src.config('bundle', 'mainreporoot')
1032 r = src.config('bundle', 'mainreporoot')
1034 if r:
1033 if r:
1035 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1034 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1036
1035
1037 # copy selected local settings to the remote ui
1036 # copy selected local settings to the remote ui
1038 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1037 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1039 for key, val in src.configitems(sect):
1038 for key, val in src.configitems(sect):
1040 dst.setconfig(sect, key, val, 'copied')
1039 dst.setconfig(sect, key, val, 'copied')
1041 v = src.config('web', 'cacerts')
1040 v = src.config('web', 'cacerts')
1042 if v:
1041 if v:
1043 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1042 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1044
1043
1045 return dst
1044 return dst
1046
1045
1047 # Files of interest
1046 # Files of interest
1048 # Used to check if the repository has changed looking at mtime and size of
1047 # Used to check if the repository has changed looking at mtime and size of
1049 # these files.
1048 # these files.
1050 foi = [('spath', '00changelog.i'),
1049 foi = [('spath', '00changelog.i'),
1051 ('spath', 'phaseroots'), # ! phase can change content at the same size
1050 ('spath', 'phaseroots'), # ! phase can change content at the same size
1052 ('spath', 'obsstore'),
1051 ('spath', 'obsstore'),
1053 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1052 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1054 ]
1053 ]
1055
1054
1056 class cachedlocalrepo(object):
1055 class cachedlocalrepo(object):
1057 """Holds a localrepository that can be cached and reused."""
1056 """Holds a localrepository that can be cached and reused."""
1058
1057
1059 def __init__(self, repo):
1058 def __init__(self, repo):
1060 """Create a new cached repo from an existing repo.
1059 """Create a new cached repo from an existing repo.
1061
1060
1062 We assume the passed in repo was recently created. If the
1061 We assume the passed in repo was recently created. If the
1063 repo has changed between when it was created and when it was
1062 repo has changed between when it was created and when it was
1064 turned into a cache, it may not refresh properly.
1063 turned into a cache, it may not refresh properly.
1065 """
1064 """
1066 assert isinstance(repo, localrepo.localrepository)
1065 assert isinstance(repo, localrepo.localrepository)
1067 self._repo = repo
1066 self._repo = repo
1068 self._state, self.mtime = self._repostate()
1067 self._state, self.mtime = self._repostate()
1069 self._filtername = repo.filtername
1068 self._filtername = repo.filtername
1070
1069
1071 def fetch(self):
1070 def fetch(self):
1072 """Refresh (if necessary) and return a repository.
1071 """Refresh (if necessary) and return a repository.
1073
1072
1074 If the cached instance is out of date, it will be recreated
1073 If the cached instance is out of date, it will be recreated
1075 automatically and returned.
1074 automatically and returned.
1076
1075
1077 Returns a tuple of the repo and a boolean indicating whether a new
1076 Returns a tuple of the repo and a boolean indicating whether a new
1078 repo instance was created.
1077 repo instance was created.
1079 """
1078 """
1080 # We compare the mtimes and sizes of some well-known files to
1079 # We compare the mtimes and sizes of some well-known files to
1081 # determine if the repo changed. This is not precise, as mtimes
1080 # determine if the repo changed. This is not precise, as mtimes
1082 # are susceptible to clock skew and imprecise filesystems and
1081 # are susceptible to clock skew and imprecise filesystems and
1083 # file content can change while maintaining the same size.
1082 # file content can change while maintaining the same size.
1084
1083
1085 state, mtime = self._repostate()
1084 state, mtime = self._repostate()
1086 if state == self._state:
1085 if state == self._state:
1087 return self._repo, False
1086 return self._repo, False
1088
1087
1089 repo = repository(self._repo.baseui, self._repo.url())
1088 repo = repository(self._repo.baseui, self._repo.url())
1090 if self._filtername:
1089 if self._filtername:
1091 self._repo = repo.filtered(self._filtername)
1090 self._repo = repo.filtered(self._filtername)
1092 else:
1091 else:
1093 self._repo = repo.unfiltered()
1092 self._repo = repo.unfiltered()
1094 self._state = state
1093 self._state = state
1095 self.mtime = mtime
1094 self.mtime = mtime
1096
1095
1097 return self._repo, True
1096 return self._repo, True
1098
1097
1099 def _repostate(self):
1098 def _repostate(self):
1100 state = []
1099 state = []
1101 maxmtime = -1
1100 maxmtime = -1
1102 for attr, fname in foi:
1101 for attr, fname in foi:
1103 prefix = getattr(self._repo, attr)
1102 prefix = getattr(self._repo, attr)
1104 p = os.path.join(prefix, fname)
1103 p = os.path.join(prefix, fname)
1105 try:
1104 try:
1106 st = os.stat(p)
1105 st = os.stat(p)
1107 except OSError:
1106 except OSError:
1108 st = os.stat(prefix)
1107 st = os.stat(prefix)
1109 state.append((st.st_mtime, st.st_size))
1108 state.append((st.st_mtime, st.st_size))
1110 maxmtime = max(maxmtime, st.st_mtime)
1109 maxmtime = max(maxmtime, st.st_mtime)
1111
1110
1112 return tuple(state), maxmtime
1111 return tuple(state), maxmtime
1113
1112
1114 def copy(self):
1113 def copy(self):
1115 """Obtain a copy of this class instance.
1114 """Obtain a copy of this class instance.
1116
1115
1117 A new localrepository instance is obtained. The new instance should be
1116 A new localrepository instance is obtained. The new instance should be
1118 completely independent of the original.
1117 completely independent of the original.
1119 """
1118 """
1120 repo = repository(self._repo.baseui, self._repo.origroot)
1119 repo = repository(self._repo.baseui, self._repo.origroot)
1121 if self._filtername:
1120 if self._filtername:
1122 repo = repo.filtered(self._filtername)
1121 repo = repo.filtered(self._filtername)
1123 else:
1122 else:
1124 repo = repo.unfiltered()
1123 repo = repo.unfiltered()
1125 c = cachedlocalrepo(repo)
1124 c = cachedlocalrepo(repo)
1126 c._state = self._state
1125 c._state = self._state
1127 c.mtime = self.mtime
1126 c.mtime = self.mtime
1128 return c
1127 return c
General Comments 0
You need to be logged in to leave comments. Login now