##// END OF EJS Templates
share: handle --relative shares to a different drive letter gracefully...
Matt Harbison -
r34980:b64ea7fb stable
parent child Browse files
Show More
@@ -1,1101 +1,1103 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 merge as mergemod,
31 merge as mergemod,
32 node,
32 node,
33 phases,
33 phases,
34 repoview,
34 repoview,
35 scmutil,
35 scmutil,
36 sshpeer,
36 sshpeer,
37 statichttprepo,
37 statichttprepo,
38 ui as uimod,
38 ui as uimod,
39 unionrepo,
39 unionrepo,
40 url,
40 url,
41 util,
41 util,
42 verify as verifymod,
42 verify as verifymod,
43 vfs as vfsmod,
43 vfs as vfsmod,
44 )
44 )
45
45
46 release = lock.release
46 release = lock.release
47
47
48 # shared features
48 # shared features
49 sharedbookmarks = 'bookmarks'
49 sharedbookmarks = 'bookmarks'
50
50
51 def _local(path):
51 def _local(path):
52 path = util.expandpath(util.urllocalpath(path))
52 path = util.expandpath(util.urllocalpath(path))
53 return (os.path.isfile(path) and bundlerepo or localrepo)
53 return (os.path.isfile(path) and bundlerepo or localrepo)
54
54
55 def addbranchrevs(lrepo, other, branches, revs):
55 def addbranchrevs(lrepo, other, branches, revs):
56 peer = other.peer() # a courtesy to callers using a localrepo for other
56 peer = other.peer() # a courtesy to callers using a localrepo for other
57 hashbranch, branches = branches
57 hashbranch, branches = branches
58 if not hashbranch and not branches:
58 if not hashbranch and not branches:
59 x = revs or None
59 x = revs or None
60 if util.safehasattr(revs, 'first'):
60 if util.safehasattr(revs, 'first'):
61 y = revs.first()
61 y = revs.first()
62 elif revs:
62 elif revs:
63 y = revs[0]
63 y = revs[0]
64 else:
64 else:
65 y = None
65 y = None
66 return x, y
66 return x, y
67 if revs:
67 if revs:
68 revs = list(revs)
68 revs = list(revs)
69 else:
69 else:
70 revs = []
70 revs = []
71
71
72 if not peer.capable('branchmap'):
72 if not peer.capable('branchmap'):
73 if branches:
73 if branches:
74 raise error.Abort(_("remote branch lookup not supported"))
74 raise error.Abort(_("remote branch lookup not supported"))
75 revs.append(hashbranch)
75 revs.append(hashbranch)
76 return revs, revs[0]
76 return revs, revs[0]
77 branchmap = peer.branchmap()
77 branchmap = peer.branchmap()
78
78
79 def primary(branch):
79 def primary(branch):
80 if branch == '.':
80 if branch == '.':
81 if not lrepo:
81 if not lrepo:
82 raise error.Abort(_("dirstate branch not accessible"))
82 raise error.Abort(_("dirstate branch not accessible"))
83 branch = lrepo.dirstate.branch()
83 branch = lrepo.dirstate.branch()
84 if branch in branchmap:
84 if branch in branchmap:
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 return True
86 return True
87 else:
87 else:
88 return False
88 return False
89
89
90 for branch in branches:
90 for branch in branches:
91 if not primary(branch):
91 if not primary(branch):
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 if hashbranch:
93 if hashbranch:
94 if not primary(hashbranch):
94 if not primary(hashbranch):
95 revs.append(hashbranch)
95 revs.append(hashbranch)
96 return revs, revs[0]
96 return revs, revs[0]
97
97
98 def parseurl(path, branches=None):
98 def parseurl(path, branches=None):
99 '''parse url#branch, returning (url, (branch, branches))'''
99 '''parse url#branch, returning (url, (branch, branches))'''
100
100
101 u = util.url(path)
101 u = util.url(path)
102 branch = None
102 branch = None
103 if u.fragment:
103 if u.fragment:
104 branch = u.fragment
104 branch = u.fragment
105 u.fragment = None
105 u.fragment = None
106 return bytes(u), (branch, branches or [])
106 return bytes(u), (branch, branches or [])
107
107
108 schemes = {
108 schemes = {
109 'bundle': bundlerepo,
109 'bundle': bundlerepo,
110 'union': unionrepo,
110 'union': unionrepo,
111 'file': _local,
111 'file': _local,
112 'http': httppeer,
112 'http': httppeer,
113 'https': httppeer,
113 'https': httppeer,
114 'ssh': sshpeer,
114 'ssh': sshpeer,
115 'static-http': statichttprepo,
115 'static-http': statichttprepo,
116 }
116 }
117
117
118 def _peerlookup(path):
118 def _peerlookup(path):
119 u = util.url(path)
119 u = util.url(path)
120 scheme = u.scheme or 'file'
120 scheme = u.scheme or 'file'
121 thing = schemes.get(scheme) or schemes['file']
121 thing = schemes.get(scheme) or schemes['file']
122 try:
122 try:
123 return thing(path)
123 return thing(path)
124 except TypeError:
124 except TypeError:
125 # we can't test callable(thing) because 'thing' can be an unloaded
125 # we can't test callable(thing) because 'thing' can be an unloaded
126 # module that implements __call__
126 # module that implements __call__
127 if not util.safehasattr(thing, 'instance'):
127 if not util.safehasattr(thing, 'instance'):
128 raise
128 raise
129 return thing
129 return thing
130
130
131 def islocal(repo):
131 def islocal(repo):
132 '''return true if repo (or path pointing to repo) is local'''
132 '''return true if repo (or path pointing to repo) is local'''
133 if isinstance(repo, bytes):
133 if isinstance(repo, bytes):
134 try:
134 try:
135 return _peerlookup(repo).islocal(repo)
135 return _peerlookup(repo).islocal(repo)
136 except AttributeError:
136 except AttributeError:
137 return False
137 return False
138 return repo.local()
138 return repo.local()
139
139
140 def openpath(ui, path):
140 def openpath(ui, path):
141 '''open path with open if local, url.open if remote'''
141 '''open path with open if local, url.open if remote'''
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 if pathurl.islocal():
143 if pathurl.islocal():
144 return util.posixfile(pathurl.localpath(), 'rb')
144 return util.posixfile(pathurl.localpath(), 'rb')
145 else:
145 else:
146 return url.open(ui, path)
146 return url.open(ui, path)
147
147
148 # a list of (ui, repo) functions called for wire peer initialization
148 # a list of (ui, repo) functions called for wire peer initialization
149 wirepeersetupfuncs = []
149 wirepeersetupfuncs = []
150
150
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 """return a repository object for the specified path"""
152 """return a repository object for the specified path"""
153 obj = _peerlookup(path).instance(ui, path, create)
153 obj = _peerlookup(path).instance(ui, path, create)
154 ui = getattr(obj, "ui", ui)
154 ui = getattr(obj, "ui", ui)
155 for f in presetupfuncs or []:
155 for f in presetupfuncs or []:
156 f(ui, obj)
156 f(ui, obj)
157 for name, module in extensions.extensions(ui):
157 for name, module in extensions.extensions(ui):
158 hook = getattr(module, 'reposetup', None)
158 hook = getattr(module, 'reposetup', None)
159 if hook:
159 if hook:
160 hook(ui, obj)
160 hook(ui, obj)
161 if not obj.local():
161 if not obj.local():
162 for f in wirepeersetupfuncs:
162 for f in wirepeersetupfuncs:
163 f(ui, obj)
163 f(ui, obj)
164 return obj
164 return obj
165
165
166 def repository(ui, path='', create=False, presetupfuncs=None):
166 def repository(ui, path='', create=False, presetupfuncs=None):
167 """return a repository object for the specified path"""
167 """return a repository object for the specified path"""
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 repo = peer.local()
169 repo = peer.local()
170 if not repo:
170 if not repo:
171 raise error.Abort(_("repository '%s' is not local") %
171 raise error.Abort(_("repository '%s' is not local") %
172 (path or peer.url()))
172 (path or peer.url()))
173 return repo.filtered('visible')
173 return repo.filtered('visible')
174
174
175 def peer(uiorrepo, opts, path, create=False):
175 def peer(uiorrepo, opts, path, create=False):
176 '''return a repository peer for the specified path'''
176 '''return a repository peer for the specified path'''
177 rui = remoteui(uiorrepo, opts)
177 rui = remoteui(uiorrepo, opts)
178 return _peerorrepo(rui, path, create).peer()
178 return _peerorrepo(rui, path, create).peer()
179
179
180 def defaultdest(source):
180 def defaultdest(source):
181 '''return default destination of clone if none is given
181 '''return default destination of clone if none is given
182
182
183 >>> defaultdest(b'foo')
183 >>> defaultdest(b'foo')
184 'foo'
184 'foo'
185 >>> defaultdest(b'/foo/bar')
185 >>> defaultdest(b'/foo/bar')
186 'bar'
186 'bar'
187 >>> defaultdest(b'/')
187 >>> defaultdest(b'/')
188 ''
188 ''
189 >>> defaultdest(b'')
189 >>> defaultdest(b'')
190 ''
190 ''
191 >>> defaultdest(b'http://example.org/')
191 >>> defaultdest(b'http://example.org/')
192 ''
192 ''
193 >>> defaultdest(b'http://example.org/foo/')
193 >>> defaultdest(b'http://example.org/foo/')
194 'foo'
194 'foo'
195 '''
195 '''
196 path = util.url(source).path
196 path = util.url(source).path
197 if not path:
197 if not path:
198 return ''
198 return ''
199 return os.path.basename(os.path.normpath(path))
199 return os.path.basename(os.path.normpath(path))
200
200
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 relative=False):
202 relative=False):
203 '''create a shared repository'''
203 '''create a shared repository'''
204
204
205 if not islocal(source):
205 if not islocal(source):
206 raise error.Abort(_('can only share local repositories'))
206 raise error.Abort(_('can only share local repositories'))
207
207
208 if not dest:
208 if not dest:
209 dest = defaultdest(source)
209 dest = defaultdest(source)
210 else:
210 else:
211 dest = ui.expandpath(dest)
211 dest = ui.expandpath(dest)
212
212
213 if isinstance(source, str):
213 if isinstance(source, str):
214 origsource = ui.expandpath(source)
214 origsource = ui.expandpath(source)
215 source, branches = parseurl(origsource)
215 source, branches = parseurl(origsource)
216 srcrepo = repository(ui, source)
216 srcrepo = repository(ui, source)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 else:
218 else:
219 srcrepo = source.local()
219 srcrepo = source.local()
220 origsource = source = srcrepo.url()
220 origsource = source = srcrepo.url()
221 checkout = None
221 checkout = None
222
222
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
224
224
225 destwvfs = vfsmod.vfs(dest, realpath=True)
225 destwvfs = vfsmod.vfs(dest, realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227
227
228 if destvfs.lexists():
228 if destvfs.lexists():
229 raise error.Abort(_('destination already exists'))
229 raise error.Abort(_('destination already exists'))
230
230
231 if not destwvfs.isdir():
231 if not destwvfs.isdir():
232 destwvfs.mkdir()
232 destwvfs.mkdir()
233 destvfs.makedir()
233 destvfs.makedir()
234
234
235 requirements = ''
235 requirements = ''
236 try:
236 try:
237 requirements = srcrepo.vfs.read('requires')
237 requirements = srcrepo.vfs.read('requires')
238 except IOError as inst:
238 except IOError as inst:
239 if inst.errno != errno.ENOENT:
239 if inst.errno != errno.ENOENT:
240 raise
240 raise
241
241
242 if relative:
242 if relative:
243 try:
243 try:
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 requirements += 'relshared\n'
245 requirements += 'relshared\n'
246 except IOError as e:
246 except (IOError, ValueError) as e:
247 # ValueError is raised on Windows if the drive letters differ on
248 # each path
247 raise error.Abort(_('cannot calculate relative path'),
249 raise error.Abort(_('cannot calculate relative path'),
248 hint=str(e))
250 hint=str(e))
249 else:
251 else:
250 requirements += 'shared\n'
252 requirements += 'shared\n'
251
253
252 destvfs.write('requires', requirements)
254 destvfs.write('requires', requirements)
253 destvfs.write('sharedpath', sharedpath)
255 destvfs.write('sharedpath', sharedpath)
254
256
255 r = repository(ui, destwvfs.base)
257 r = repository(ui, destwvfs.base)
256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
258 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
257 _postshareupdate(r, update, checkout=checkout)
259 _postshareupdate(r, update, checkout=checkout)
258 return r
260 return r
259
261
260 def unshare(ui, repo):
262 def unshare(ui, repo):
261 """convert a shared repository to a normal one
263 """convert a shared repository to a normal one
262
264
263 Copy the store data to the repo and remove the sharedpath data.
265 Copy the store data to the repo and remove the sharedpath data.
264 """
266 """
265
267
266 destlock = lock = None
268 destlock = lock = None
267 lock = repo.lock()
269 lock = repo.lock()
268 try:
270 try:
269 # we use locks here because if we race with commit, we
271 # we use locks here because if we race with commit, we
270 # can end up with extra data in the cloned revlogs that's
272 # can end up with extra data in the cloned revlogs that's
271 # not pointed to by changesets, thus causing verify to
273 # not pointed to by changesets, thus causing verify to
272 # fail
274 # fail
273
275
274 destlock = copystore(ui, repo, repo.path)
276 destlock = copystore(ui, repo, repo.path)
275
277
276 sharefile = repo.vfs.join('sharedpath')
278 sharefile = repo.vfs.join('sharedpath')
277 util.rename(sharefile, sharefile + '.old')
279 util.rename(sharefile, sharefile + '.old')
278
280
279 repo.requirements.discard('shared')
281 repo.requirements.discard('shared')
280 repo.requirements.discard('relshared')
282 repo.requirements.discard('relshared')
281 repo._writerequirements()
283 repo._writerequirements()
282 finally:
284 finally:
283 destlock and destlock.release()
285 destlock and destlock.release()
284 lock and lock.release()
286 lock and lock.release()
285
287
286 # update store, spath, svfs and sjoin of repo
288 # update store, spath, svfs and sjoin of repo
287 repo.unfiltered().__init__(repo.baseui, repo.root)
289 repo.unfiltered().__init__(repo.baseui, repo.root)
288
290
289 # TODO: figure out how to access subrepos that exist, but were previously
291 # TODO: figure out how to access subrepos that exist, but were previously
290 # removed from .hgsub
292 # removed from .hgsub
291 c = repo['.']
293 c = repo['.']
292 subs = c.substate
294 subs = c.substate
293 for s in sorted(subs):
295 for s in sorted(subs):
294 c.sub(s).unshare()
296 c.sub(s).unshare()
295
297
296 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
298 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
297 """Called after a new shared repo is created.
299 """Called after a new shared repo is created.
298
300
299 The new repo only has a requirements file and pointer to the source.
301 The new repo only has a requirements file and pointer to the source.
300 This function configures additional shared data.
302 This function configures additional shared data.
301
303
302 Extensions can wrap this function and write additional entries to
304 Extensions can wrap this function and write additional entries to
303 destrepo/.hg/shared to indicate additional pieces of data to be shared.
305 destrepo/.hg/shared to indicate additional pieces of data to be shared.
304 """
306 """
305 default = defaultpath or sourcerepo.ui.config('paths', 'default')
307 default = defaultpath or sourcerepo.ui.config('paths', 'default')
306 if default:
308 if default:
307 fp = destrepo.vfs("hgrc", "w", text=True)
309 fp = destrepo.vfs("hgrc", "w", text=True)
308 fp.write("[paths]\n")
310 fp.write("[paths]\n")
309 fp.write("default = %s\n" % default)
311 fp.write("default = %s\n" % default)
310 fp.close()
312 fp.close()
311
313
312 with destrepo.wlock():
314 with destrepo.wlock():
313 if bookmarks:
315 if bookmarks:
314 fp = destrepo.vfs('shared', 'w')
316 fp = destrepo.vfs('shared', 'w')
315 fp.write(sharedbookmarks + '\n')
317 fp.write(sharedbookmarks + '\n')
316 fp.close()
318 fp.close()
317
319
318 def _postshareupdate(repo, update, checkout=None):
320 def _postshareupdate(repo, update, checkout=None):
319 """Maybe perform a working directory update after a shared repo is created.
321 """Maybe perform a working directory update after a shared repo is created.
320
322
321 ``update`` can be a boolean or a revision to update to.
323 ``update`` can be a boolean or a revision to update to.
322 """
324 """
323 if not update:
325 if not update:
324 return
326 return
325
327
326 repo.ui.status(_("updating working directory\n"))
328 repo.ui.status(_("updating working directory\n"))
327 if update is not True:
329 if update is not True:
328 checkout = update
330 checkout = update
329 for test in (checkout, 'default', 'tip'):
331 for test in (checkout, 'default', 'tip'):
330 if test is None:
332 if test is None:
331 continue
333 continue
332 try:
334 try:
333 uprev = repo.lookup(test)
335 uprev = repo.lookup(test)
334 break
336 break
335 except error.RepoLookupError:
337 except error.RepoLookupError:
336 continue
338 continue
337 _update(repo, uprev)
339 _update(repo, uprev)
338
340
339 def copystore(ui, srcrepo, destpath):
341 def copystore(ui, srcrepo, destpath):
340 '''copy files from store of srcrepo in destpath
342 '''copy files from store of srcrepo in destpath
341
343
342 returns destlock
344 returns destlock
343 '''
345 '''
344 destlock = None
346 destlock = None
345 try:
347 try:
346 hardlink = None
348 hardlink = None
347 num = 0
349 num = 0
348 closetopic = [None]
350 closetopic = [None]
349 def prog(topic, pos):
351 def prog(topic, pos):
350 if pos is None:
352 if pos is None:
351 closetopic[0] = topic
353 closetopic[0] = topic
352 else:
354 else:
353 ui.progress(topic, pos + num)
355 ui.progress(topic, pos + num)
354 srcpublishing = srcrepo.publishing()
356 srcpublishing = srcrepo.publishing()
355 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
357 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
356 dstvfs = vfsmod.vfs(destpath)
358 dstvfs = vfsmod.vfs(destpath)
357 for f in srcrepo.store.copylist():
359 for f in srcrepo.store.copylist():
358 if srcpublishing and f.endswith('phaseroots'):
360 if srcpublishing and f.endswith('phaseroots'):
359 continue
361 continue
360 dstbase = os.path.dirname(f)
362 dstbase = os.path.dirname(f)
361 if dstbase and not dstvfs.exists(dstbase):
363 if dstbase and not dstvfs.exists(dstbase):
362 dstvfs.mkdir(dstbase)
364 dstvfs.mkdir(dstbase)
363 if srcvfs.exists(f):
365 if srcvfs.exists(f):
364 if f.endswith('data'):
366 if f.endswith('data'):
365 # 'dstbase' may be empty (e.g. revlog format 0)
367 # 'dstbase' may be empty (e.g. revlog format 0)
366 lockfile = os.path.join(dstbase, "lock")
368 lockfile = os.path.join(dstbase, "lock")
367 # lock to avoid premature writing to the target
369 # lock to avoid premature writing to the target
368 destlock = lock.lock(dstvfs, lockfile)
370 destlock = lock.lock(dstvfs, lockfile)
369 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
371 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
370 hardlink, progress=prog)
372 hardlink, progress=prog)
371 num += n
373 num += n
372 if hardlink:
374 if hardlink:
373 ui.debug("linked %d files\n" % num)
375 ui.debug("linked %d files\n" % num)
374 if closetopic[0]:
376 if closetopic[0]:
375 ui.progress(closetopic[0], None)
377 ui.progress(closetopic[0], None)
376 else:
378 else:
377 ui.debug("copied %d files\n" % num)
379 ui.debug("copied %d files\n" % num)
378 if closetopic[0]:
380 if closetopic[0]:
379 ui.progress(closetopic[0], None)
381 ui.progress(closetopic[0], None)
380 return destlock
382 return destlock
381 except: # re-raises
383 except: # re-raises
382 release(destlock)
384 release(destlock)
383 raise
385 raise
384
386
385 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
387 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
386 rev=None, update=True, stream=False):
388 rev=None, update=True, stream=False):
387 """Perform a clone using a shared repo.
389 """Perform a clone using a shared repo.
388
390
389 The store for the repository will be located at <sharepath>/.hg. The
391 The store for the repository will be located at <sharepath>/.hg. The
390 specified revisions will be cloned or pulled from "source". A shared repo
392 specified revisions will be cloned or pulled from "source". A shared repo
391 will be created at "dest" and a working copy will be created if "update" is
393 will be created at "dest" and a working copy will be created if "update" is
392 True.
394 True.
393 """
395 """
394 revs = None
396 revs = None
395 if rev:
397 if rev:
396 if not srcpeer.capable('lookup'):
398 if not srcpeer.capable('lookup'):
397 raise error.Abort(_("src repository does not support "
399 raise error.Abort(_("src repository does not support "
398 "revision lookup and so doesn't "
400 "revision lookup and so doesn't "
399 "support clone by revision"))
401 "support clone by revision"))
400 revs = [srcpeer.lookup(r) for r in rev]
402 revs = [srcpeer.lookup(r) for r in rev]
401
403
402 # Obtain a lock before checking for or cloning the pooled repo otherwise
404 # Obtain a lock before checking for or cloning the pooled repo otherwise
403 # 2 clients may race creating or populating it.
405 # 2 clients may race creating or populating it.
404 pooldir = os.path.dirname(sharepath)
406 pooldir = os.path.dirname(sharepath)
405 # lock class requires the directory to exist.
407 # lock class requires the directory to exist.
406 try:
408 try:
407 util.makedir(pooldir, False)
409 util.makedir(pooldir, False)
408 except OSError as e:
410 except OSError as e:
409 if e.errno != errno.EEXIST:
411 if e.errno != errno.EEXIST:
410 raise
412 raise
411
413
412 poolvfs = vfsmod.vfs(pooldir)
414 poolvfs = vfsmod.vfs(pooldir)
413 basename = os.path.basename(sharepath)
415 basename = os.path.basename(sharepath)
414
416
415 with lock.lock(poolvfs, '%s.lock' % basename):
417 with lock.lock(poolvfs, '%s.lock' % basename):
416 if os.path.exists(sharepath):
418 if os.path.exists(sharepath):
417 ui.status(_('(sharing from existing pooled repository %s)\n') %
419 ui.status(_('(sharing from existing pooled repository %s)\n') %
418 basename)
420 basename)
419 else:
421 else:
420 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
422 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
421 # Always use pull mode because hardlinks in share mode don't work
423 # Always use pull mode because hardlinks in share mode don't work
422 # well. Never update because working copies aren't necessary in
424 # well. Never update because working copies aren't necessary in
423 # share mode.
425 # share mode.
424 clone(ui, peeropts, source, dest=sharepath, pull=True,
426 clone(ui, peeropts, source, dest=sharepath, pull=True,
425 rev=rev, update=False, stream=stream)
427 rev=rev, update=False, stream=stream)
426
428
427 # Resolve the value to put in [paths] section for the source.
429 # Resolve the value to put in [paths] section for the source.
428 if islocal(source):
430 if islocal(source):
429 defaultpath = os.path.abspath(util.urllocalpath(source))
431 defaultpath = os.path.abspath(util.urllocalpath(source))
430 else:
432 else:
431 defaultpath = source
433 defaultpath = source
432
434
433 sharerepo = repository(ui, path=sharepath)
435 sharerepo = repository(ui, path=sharepath)
434 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
436 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
435 defaultpath=defaultpath)
437 defaultpath=defaultpath)
436
438
437 # We need to perform a pull against the dest repo to fetch bookmarks
439 # We need to perform a pull against the dest repo to fetch bookmarks
438 # and other non-store data that isn't shared by default. In the case of
440 # and other non-store data that isn't shared by default. In the case of
439 # non-existing shared repo, this means we pull from the remote twice. This
441 # non-existing shared repo, this means we pull from the remote twice. This
440 # is a bit weird. But at the time it was implemented, there wasn't an easy
442 # is a bit weird. But at the time it was implemented, there wasn't an easy
441 # way to pull just non-changegroup data.
443 # way to pull just non-changegroup data.
442 destrepo = repository(ui, path=dest)
444 destrepo = repository(ui, path=dest)
443 exchange.pull(destrepo, srcpeer, heads=revs)
445 exchange.pull(destrepo, srcpeer, heads=revs)
444
446
445 _postshareupdate(destrepo, update)
447 _postshareupdate(destrepo, update)
446
448
447 return srcpeer, peer(ui, peeropts, dest)
449 return srcpeer, peer(ui, peeropts, dest)
448
450
449 # Recomputing branch cache might be slow on big repos,
451 # Recomputing branch cache might be slow on big repos,
450 # so just copy it
452 # so just copy it
451 def _copycache(srcrepo, dstcachedir, fname):
453 def _copycache(srcrepo, dstcachedir, fname):
452 """copy a cache from srcrepo to destcachedir (if it exists)"""
454 """copy a cache from srcrepo to destcachedir (if it exists)"""
453 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
455 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
454 dstbranchcache = os.path.join(dstcachedir, fname)
456 dstbranchcache = os.path.join(dstcachedir, fname)
455 if os.path.exists(srcbranchcache):
457 if os.path.exists(srcbranchcache):
456 if not os.path.exists(dstcachedir):
458 if not os.path.exists(dstcachedir):
457 os.mkdir(dstcachedir)
459 os.mkdir(dstcachedir)
458 util.copyfile(srcbranchcache, dstbranchcache)
460 util.copyfile(srcbranchcache, dstbranchcache)
459
461
460 def _cachetocopy(srcrepo):
462 def _cachetocopy(srcrepo):
461 """return the list of cache file valuable to copy during a clone"""
463 """return the list of cache file valuable to copy during a clone"""
462 # In local clones we're copying all nodes, not just served
464 # In local clones we're copying all nodes, not just served
463 # ones. Therefore copy all branch caches over.
465 # ones. Therefore copy all branch caches over.
464 cachefiles = ['branch2']
466 cachefiles = ['branch2']
465 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
467 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
466 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
468 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
467 cachefiles += ['tags2']
469 cachefiles += ['tags2']
468 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
470 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
469 cachefiles += ['hgtagsfnodes1']
471 cachefiles += ['hgtagsfnodes1']
470 return cachefiles
472 return cachefiles
471
473
472 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
474 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
473 update=True, stream=False, branch=None, shareopts=None):
475 update=True, stream=False, branch=None, shareopts=None):
474 """Make a copy of an existing repository.
476 """Make a copy of an existing repository.
475
477
476 Create a copy of an existing repository in a new directory. The
478 Create a copy of an existing repository in a new directory. The
477 source and destination are URLs, as passed to the repository
479 source and destination are URLs, as passed to the repository
478 function. Returns a pair of repository peers, the source and
480 function. Returns a pair of repository peers, the source and
479 newly created destination.
481 newly created destination.
480
482
481 The location of the source is added to the new repository's
483 The location of the source is added to the new repository's
482 .hg/hgrc file, as the default to be used for future pulls and
484 .hg/hgrc file, as the default to be used for future pulls and
483 pushes.
485 pushes.
484
486
485 If an exception is raised, the partly cloned/updated destination
487 If an exception is raised, the partly cloned/updated destination
486 repository will be deleted.
488 repository will be deleted.
487
489
488 Arguments:
490 Arguments:
489
491
490 source: repository object or URL
492 source: repository object or URL
491
493
492 dest: URL of destination repository to create (defaults to base
494 dest: URL of destination repository to create (defaults to base
493 name of source repository)
495 name of source repository)
494
496
495 pull: always pull from source repository, even in local case or if the
497 pull: always pull from source repository, even in local case or if the
496 server prefers streaming
498 server prefers streaming
497
499
498 stream: stream raw data uncompressed from repository (fast over
500 stream: stream raw data uncompressed from repository (fast over
499 LAN, slow over WAN)
501 LAN, slow over WAN)
500
502
501 rev: revision to clone up to (implies pull=True)
503 rev: revision to clone up to (implies pull=True)
502
504
503 update: update working directory after clone completes, if
505 update: update working directory after clone completes, if
504 destination is local repository (True means update to default rev,
506 destination is local repository (True means update to default rev,
505 anything else is treated as a revision)
507 anything else is treated as a revision)
506
508
507 branch: branches to clone
509 branch: branches to clone
508
510
509 shareopts: dict of options to control auto sharing behavior. The "pool" key
511 shareopts: dict of options to control auto sharing behavior. The "pool" key
510 activates auto sharing mode and defines the directory for stores. The
512 activates auto sharing mode and defines the directory for stores. The
511 "mode" key determines how to construct the directory name of the shared
513 "mode" key determines how to construct the directory name of the shared
512 repository. "identity" means the name is derived from the node of the first
514 repository. "identity" means the name is derived from the node of the first
513 changeset in the repository. "remote" means the name is derived from the
515 changeset in the repository. "remote" means the name is derived from the
514 remote's path/URL. Defaults to "identity."
516 remote's path/URL. Defaults to "identity."
515 """
517 """
516
518
517 if isinstance(source, bytes):
519 if isinstance(source, bytes):
518 origsource = ui.expandpath(source)
520 origsource = ui.expandpath(source)
519 source, branch = parseurl(origsource, branch)
521 source, branch = parseurl(origsource, branch)
520 srcpeer = peer(ui, peeropts, source)
522 srcpeer = peer(ui, peeropts, source)
521 else:
523 else:
522 srcpeer = source.peer() # in case we were called with a localrepo
524 srcpeer = source.peer() # in case we were called with a localrepo
523 branch = (None, branch or [])
525 branch = (None, branch or [])
524 origsource = source = srcpeer.url()
526 origsource = source = srcpeer.url()
525 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
527 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
526
528
527 if dest is None:
529 if dest is None:
528 dest = defaultdest(source)
530 dest = defaultdest(source)
529 if dest:
531 if dest:
530 ui.status(_("destination directory: %s\n") % dest)
532 ui.status(_("destination directory: %s\n") % dest)
531 else:
533 else:
532 dest = ui.expandpath(dest)
534 dest = ui.expandpath(dest)
533
535
534 dest = util.urllocalpath(dest)
536 dest = util.urllocalpath(dest)
535 source = util.urllocalpath(source)
537 source = util.urllocalpath(source)
536
538
537 if not dest:
539 if not dest:
538 raise error.Abort(_("empty destination path is not valid"))
540 raise error.Abort(_("empty destination path is not valid"))
539
541
540 destvfs = vfsmod.vfs(dest, expandpath=True)
542 destvfs = vfsmod.vfs(dest, expandpath=True)
541 if destvfs.lexists():
543 if destvfs.lexists():
542 if not destvfs.isdir():
544 if not destvfs.isdir():
543 raise error.Abort(_("destination '%s' already exists") % dest)
545 raise error.Abort(_("destination '%s' already exists") % dest)
544 elif destvfs.listdir():
546 elif destvfs.listdir():
545 raise error.Abort(_("destination '%s' is not empty") % dest)
547 raise error.Abort(_("destination '%s' is not empty") % dest)
546
548
547 shareopts = shareopts or {}
549 shareopts = shareopts or {}
548 sharepool = shareopts.get('pool')
550 sharepool = shareopts.get('pool')
549 sharenamemode = shareopts.get('mode')
551 sharenamemode = shareopts.get('mode')
550 if sharepool and islocal(dest):
552 if sharepool and islocal(dest):
551 sharepath = None
553 sharepath = None
552 if sharenamemode == 'identity':
554 if sharenamemode == 'identity':
553 # Resolve the name from the initial changeset in the remote
555 # Resolve the name from the initial changeset in the remote
554 # repository. This returns nullid when the remote is empty. It
556 # repository. This returns nullid when the remote is empty. It
555 # raises RepoLookupError if revision 0 is filtered or otherwise
557 # raises RepoLookupError if revision 0 is filtered or otherwise
556 # not available. If we fail to resolve, sharing is not enabled.
558 # not available. If we fail to resolve, sharing is not enabled.
557 try:
559 try:
558 rootnode = srcpeer.lookup('0')
560 rootnode = srcpeer.lookup('0')
559 if rootnode != node.nullid:
561 if rootnode != node.nullid:
560 sharepath = os.path.join(sharepool, node.hex(rootnode))
562 sharepath = os.path.join(sharepool, node.hex(rootnode))
561 else:
563 else:
562 ui.status(_('(not using pooled storage: '
564 ui.status(_('(not using pooled storage: '
563 'remote appears to be empty)\n'))
565 'remote appears to be empty)\n'))
564 except error.RepoLookupError:
566 except error.RepoLookupError:
565 ui.status(_('(not using pooled storage: '
567 ui.status(_('(not using pooled storage: '
566 'unable to resolve identity of remote)\n'))
568 'unable to resolve identity of remote)\n'))
567 elif sharenamemode == 'remote':
569 elif sharenamemode == 'remote':
568 sharepath = os.path.join(
570 sharepath = os.path.join(
569 sharepool, hashlib.sha1(source).hexdigest())
571 sharepool, hashlib.sha1(source).hexdigest())
570 else:
572 else:
571 raise error.Abort(_('unknown share naming mode: %s') %
573 raise error.Abort(_('unknown share naming mode: %s') %
572 sharenamemode)
574 sharenamemode)
573
575
574 if sharepath:
576 if sharepath:
575 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
577 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
576 dest, pull=pull, rev=rev, update=update,
578 dest, pull=pull, rev=rev, update=update,
577 stream=stream)
579 stream=stream)
578
580
579 srclock = destlock = cleandir = None
581 srclock = destlock = cleandir = None
580 srcrepo = srcpeer.local()
582 srcrepo = srcpeer.local()
581 try:
583 try:
582 abspath = origsource
584 abspath = origsource
583 if islocal(origsource):
585 if islocal(origsource):
584 abspath = os.path.abspath(util.urllocalpath(origsource))
586 abspath = os.path.abspath(util.urllocalpath(origsource))
585
587
586 if islocal(dest):
588 if islocal(dest):
587 cleandir = dest
589 cleandir = dest
588
590
589 copy = False
591 copy = False
590 if (srcrepo and srcrepo.cancopy() and islocal(dest)
592 if (srcrepo and srcrepo.cancopy() and islocal(dest)
591 and not phases.hassecret(srcrepo)):
593 and not phases.hassecret(srcrepo)):
592 copy = not pull and not rev
594 copy = not pull and not rev
593
595
594 if copy:
596 if copy:
595 try:
597 try:
596 # we use a lock here because if we race with commit, we
598 # we use a lock here because if we race with commit, we
597 # can end up with extra data in the cloned revlogs that's
599 # can end up with extra data in the cloned revlogs that's
598 # not pointed to by changesets, thus causing verify to
600 # not pointed to by changesets, thus causing verify to
599 # fail
601 # fail
600 srclock = srcrepo.lock(wait=False)
602 srclock = srcrepo.lock(wait=False)
601 except error.LockError:
603 except error.LockError:
602 copy = False
604 copy = False
603
605
604 if copy:
606 if copy:
605 srcrepo.hook('preoutgoing', throw=True, source='clone')
607 srcrepo.hook('preoutgoing', throw=True, source='clone')
606 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
608 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
607 if not os.path.exists(dest):
609 if not os.path.exists(dest):
608 os.mkdir(dest)
610 os.mkdir(dest)
609 else:
611 else:
610 # only clean up directories we create ourselves
612 # only clean up directories we create ourselves
611 cleandir = hgdir
613 cleandir = hgdir
612 try:
614 try:
613 destpath = hgdir
615 destpath = hgdir
614 util.makedir(destpath, notindexed=True)
616 util.makedir(destpath, notindexed=True)
615 except OSError as inst:
617 except OSError as inst:
616 if inst.errno == errno.EEXIST:
618 if inst.errno == errno.EEXIST:
617 cleandir = None
619 cleandir = None
618 raise error.Abort(_("destination '%s' already exists")
620 raise error.Abort(_("destination '%s' already exists")
619 % dest)
621 % dest)
620 raise
622 raise
621
623
622 destlock = copystore(ui, srcrepo, destpath)
624 destlock = copystore(ui, srcrepo, destpath)
623 # copy bookmarks over
625 # copy bookmarks over
624 srcbookmarks = srcrepo.vfs.join('bookmarks')
626 srcbookmarks = srcrepo.vfs.join('bookmarks')
625 dstbookmarks = os.path.join(destpath, 'bookmarks')
627 dstbookmarks = os.path.join(destpath, 'bookmarks')
626 if os.path.exists(srcbookmarks):
628 if os.path.exists(srcbookmarks):
627 util.copyfile(srcbookmarks, dstbookmarks)
629 util.copyfile(srcbookmarks, dstbookmarks)
628
630
629 dstcachedir = os.path.join(destpath, 'cache')
631 dstcachedir = os.path.join(destpath, 'cache')
630 for cache in _cachetocopy(srcrepo):
632 for cache in _cachetocopy(srcrepo):
631 _copycache(srcrepo, dstcachedir, cache)
633 _copycache(srcrepo, dstcachedir, cache)
632
634
633 # we need to re-init the repo after manually copying the data
635 # we need to re-init the repo after manually copying the data
634 # into it
636 # into it
635 destpeer = peer(srcrepo, peeropts, dest)
637 destpeer = peer(srcrepo, peeropts, dest)
636 srcrepo.hook('outgoing', source='clone',
638 srcrepo.hook('outgoing', source='clone',
637 node=node.hex(node.nullid))
639 node=node.hex(node.nullid))
638 else:
640 else:
639 try:
641 try:
640 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
642 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
641 # only pass ui when no srcrepo
643 # only pass ui when no srcrepo
642 except OSError as inst:
644 except OSError as inst:
643 if inst.errno == errno.EEXIST:
645 if inst.errno == errno.EEXIST:
644 cleandir = None
646 cleandir = None
645 raise error.Abort(_("destination '%s' already exists")
647 raise error.Abort(_("destination '%s' already exists")
646 % dest)
648 % dest)
647 raise
649 raise
648
650
649 revs = None
651 revs = None
650 if rev:
652 if rev:
651 if not srcpeer.capable('lookup'):
653 if not srcpeer.capable('lookup'):
652 raise error.Abort(_("src repository does not support "
654 raise error.Abort(_("src repository does not support "
653 "revision lookup and so doesn't "
655 "revision lookup and so doesn't "
654 "support clone by revision"))
656 "support clone by revision"))
655 revs = [srcpeer.lookup(r) for r in rev]
657 revs = [srcpeer.lookup(r) for r in rev]
656 checkout = revs[0]
658 checkout = revs[0]
657 local = destpeer.local()
659 local = destpeer.local()
658 if local:
660 if local:
659 if not stream:
661 if not stream:
660 if pull:
662 if pull:
661 stream = False
663 stream = False
662 else:
664 else:
663 stream = None
665 stream = None
664 # internal config: ui.quietbookmarkmove
666 # internal config: ui.quietbookmarkmove
665 overrides = {('ui', 'quietbookmarkmove'): True}
667 overrides = {('ui', 'quietbookmarkmove'): True}
666 with local.ui.configoverride(overrides, 'clone'):
668 with local.ui.configoverride(overrides, 'clone'):
667 exchange.pull(local, srcpeer, revs,
669 exchange.pull(local, srcpeer, revs,
668 streamclonerequested=stream)
670 streamclonerequested=stream)
669 elif srcrepo:
671 elif srcrepo:
670 exchange.push(srcrepo, destpeer, revs=revs,
672 exchange.push(srcrepo, destpeer, revs=revs,
671 bookmarks=srcrepo._bookmarks.keys())
673 bookmarks=srcrepo._bookmarks.keys())
672 else:
674 else:
673 raise error.Abort(_("clone from remote to remote not supported")
675 raise error.Abort(_("clone from remote to remote not supported")
674 )
676 )
675
677
676 cleandir = None
678 cleandir = None
677
679
678 destrepo = destpeer.local()
680 destrepo = destpeer.local()
679 if destrepo:
681 if destrepo:
680 template = uimod.samplehgrcs['cloned']
682 template = uimod.samplehgrcs['cloned']
681 fp = destrepo.vfs("hgrc", "wb")
683 fp = destrepo.vfs("hgrc", "wb")
682 u = util.url(abspath)
684 u = util.url(abspath)
683 u.passwd = None
685 u.passwd = None
684 defaulturl = bytes(u)
686 defaulturl = bytes(u)
685 fp.write(util.tonativeeol(template % defaulturl))
687 fp.write(util.tonativeeol(template % defaulturl))
686 fp.close()
688 fp.close()
687
689
688 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
690 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
689
691
690 if update:
692 if update:
691 if update is not True:
693 if update is not True:
692 checkout = srcpeer.lookup(update)
694 checkout = srcpeer.lookup(update)
693 uprev = None
695 uprev = None
694 status = None
696 status = None
695 if checkout is not None:
697 if checkout is not None:
696 try:
698 try:
697 uprev = destrepo.lookup(checkout)
699 uprev = destrepo.lookup(checkout)
698 except error.RepoLookupError:
700 except error.RepoLookupError:
699 if update is not True:
701 if update is not True:
700 try:
702 try:
701 uprev = destrepo.lookup(update)
703 uprev = destrepo.lookup(update)
702 except error.RepoLookupError:
704 except error.RepoLookupError:
703 pass
705 pass
704 if uprev is None:
706 if uprev is None:
705 try:
707 try:
706 uprev = destrepo._bookmarks['@']
708 uprev = destrepo._bookmarks['@']
707 update = '@'
709 update = '@'
708 bn = destrepo[uprev].branch()
710 bn = destrepo[uprev].branch()
709 if bn == 'default':
711 if bn == 'default':
710 status = _("updating to bookmark @\n")
712 status = _("updating to bookmark @\n")
711 else:
713 else:
712 status = (_("updating to bookmark @ on branch %s\n")
714 status = (_("updating to bookmark @ on branch %s\n")
713 % bn)
715 % bn)
714 except KeyError:
716 except KeyError:
715 try:
717 try:
716 uprev = destrepo.branchtip('default')
718 uprev = destrepo.branchtip('default')
717 except error.RepoLookupError:
719 except error.RepoLookupError:
718 uprev = destrepo.lookup('tip')
720 uprev = destrepo.lookup('tip')
719 if not status:
721 if not status:
720 bn = destrepo[uprev].branch()
722 bn = destrepo[uprev].branch()
721 status = _("updating to branch %s\n") % bn
723 status = _("updating to branch %s\n") % bn
722 destrepo.ui.status(status)
724 destrepo.ui.status(status)
723 _update(destrepo, uprev)
725 _update(destrepo, uprev)
724 if update in destrepo._bookmarks:
726 if update in destrepo._bookmarks:
725 bookmarks.activate(destrepo, update)
727 bookmarks.activate(destrepo, update)
726 finally:
728 finally:
727 release(srclock, destlock)
729 release(srclock, destlock)
728 if cleandir is not None:
730 if cleandir is not None:
729 shutil.rmtree(cleandir, True)
731 shutil.rmtree(cleandir, True)
730 if srcpeer is not None:
732 if srcpeer is not None:
731 srcpeer.close()
733 srcpeer.close()
732 return srcpeer, destpeer
734 return srcpeer, destpeer
733
735
734 def _showstats(repo, stats, quietempty=False):
736 def _showstats(repo, stats, quietempty=False):
735 if quietempty and not any(stats):
737 if quietempty and not any(stats):
736 return
738 return
737 repo.ui.status(_("%d files updated, %d files merged, "
739 repo.ui.status(_("%d files updated, %d files merged, "
738 "%d files removed, %d files unresolved\n") % stats)
740 "%d files removed, %d files unresolved\n") % stats)
739
741
740 def updaterepo(repo, node, overwrite, updatecheck=None):
742 def updaterepo(repo, node, overwrite, updatecheck=None):
741 """Update the working directory to node.
743 """Update the working directory to node.
742
744
743 When overwrite is set, changes are clobbered, merged else
745 When overwrite is set, changes are clobbered, merged else
744
746
745 returns stats (see pydoc mercurial.merge.applyupdates)"""
747 returns stats (see pydoc mercurial.merge.applyupdates)"""
746 return mergemod.update(repo, node, False, overwrite,
748 return mergemod.update(repo, node, False, overwrite,
747 labels=['working copy', 'destination'],
749 labels=['working copy', 'destination'],
748 updatecheck=updatecheck)
750 updatecheck=updatecheck)
749
751
750 def update(repo, node, quietempty=False, updatecheck=None):
752 def update(repo, node, quietempty=False, updatecheck=None):
751 """update the working directory to node"""
753 """update the working directory to node"""
752 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
754 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
753 _showstats(repo, stats, quietempty)
755 _showstats(repo, stats, quietempty)
754 if stats[3]:
756 if stats[3]:
755 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
757 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
756 return stats[3] > 0
758 return stats[3] > 0
757
759
758 # naming conflict in clone()
760 # naming conflict in clone()
759 _update = update
761 _update = update
760
762
761 def clean(repo, node, show_stats=True, quietempty=False):
763 def clean(repo, node, show_stats=True, quietempty=False):
762 """forcibly switch the working directory to node, clobbering changes"""
764 """forcibly switch the working directory to node, clobbering changes"""
763 stats = updaterepo(repo, node, True)
765 stats = updaterepo(repo, node, True)
764 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
766 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
765 if show_stats:
767 if show_stats:
766 _showstats(repo, stats, quietempty)
768 _showstats(repo, stats, quietempty)
767 return stats[3] > 0
769 return stats[3] > 0
768
770
769 # naming conflict in updatetotally()
771 # naming conflict in updatetotally()
770 _clean = clean
772 _clean = clean
771
773
772 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
774 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
773 """Update the working directory with extra care for non-file components
775 """Update the working directory with extra care for non-file components
774
776
775 This takes care of non-file components below:
777 This takes care of non-file components below:
776
778
777 :bookmark: might be advanced or (in)activated
779 :bookmark: might be advanced or (in)activated
778
780
779 This takes arguments below:
781 This takes arguments below:
780
782
781 :checkout: to which revision the working directory is updated
783 :checkout: to which revision the working directory is updated
782 :brev: a name, which might be a bookmark to be activated after updating
784 :brev: a name, which might be a bookmark to be activated after updating
783 :clean: whether changes in the working directory can be discarded
785 :clean: whether changes in the working directory can be discarded
784 :updatecheck: how to deal with a dirty working directory
786 :updatecheck: how to deal with a dirty working directory
785
787
786 Valid values for updatecheck are (None => linear):
788 Valid values for updatecheck are (None => linear):
787
789
788 * abort: abort if the working directory is dirty
790 * abort: abort if the working directory is dirty
789 * none: don't check (merge working directory changes into destination)
791 * none: don't check (merge working directory changes into destination)
790 * linear: check that update is linear before merging working directory
792 * linear: check that update is linear before merging working directory
791 changes into destination
793 changes into destination
792 * noconflict: check that the update does not result in file merges
794 * noconflict: check that the update does not result in file merges
793
795
794 This returns whether conflict is detected at updating or not.
796 This returns whether conflict is detected at updating or not.
795 """
797 """
796 if updatecheck is None:
798 if updatecheck is None:
797 updatecheck = ui.config('commands', 'update.check')
799 updatecheck = ui.config('commands', 'update.check')
798 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
800 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
799 # If not configured, or invalid value configured
801 # If not configured, or invalid value configured
800 updatecheck = 'linear'
802 updatecheck = 'linear'
801 with repo.wlock():
803 with repo.wlock():
802 movemarkfrom = None
804 movemarkfrom = None
803 warndest = False
805 warndest = False
804 if checkout is None:
806 if checkout is None:
805 updata = destutil.destupdate(repo, clean=clean)
807 updata = destutil.destupdate(repo, clean=clean)
806 checkout, movemarkfrom, brev = updata
808 checkout, movemarkfrom, brev = updata
807 warndest = True
809 warndest = True
808
810
809 if clean:
811 if clean:
810 ret = _clean(repo, checkout)
812 ret = _clean(repo, checkout)
811 else:
813 else:
812 if updatecheck == 'abort':
814 if updatecheck == 'abort':
813 cmdutil.bailifchanged(repo, merge=False)
815 cmdutil.bailifchanged(repo, merge=False)
814 updatecheck = 'none'
816 updatecheck = 'none'
815 ret = _update(repo, checkout, updatecheck=updatecheck)
817 ret = _update(repo, checkout, updatecheck=updatecheck)
816
818
817 if not ret and movemarkfrom:
819 if not ret and movemarkfrom:
818 if movemarkfrom == repo['.'].node():
820 if movemarkfrom == repo['.'].node():
819 pass # no-op update
821 pass # no-op update
820 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
822 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
821 b = ui.label(repo._activebookmark, 'bookmarks.active')
823 b = ui.label(repo._activebookmark, 'bookmarks.active')
822 ui.status(_("updating bookmark %s\n") % b)
824 ui.status(_("updating bookmark %s\n") % b)
823 else:
825 else:
824 # this can happen with a non-linear update
826 # this can happen with a non-linear update
825 b = ui.label(repo._activebookmark, 'bookmarks')
827 b = ui.label(repo._activebookmark, 'bookmarks')
826 ui.status(_("(leaving bookmark %s)\n") % b)
828 ui.status(_("(leaving bookmark %s)\n") % b)
827 bookmarks.deactivate(repo)
829 bookmarks.deactivate(repo)
828 elif brev in repo._bookmarks:
830 elif brev in repo._bookmarks:
829 if brev != repo._activebookmark:
831 if brev != repo._activebookmark:
830 b = ui.label(brev, 'bookmarks.active')
832 b = ui.label(brev, 'bookmarks.active')
831 ui.status(_("(activating bookmark %s)\n") % b)
833 ui.status(_("(activating bookmark %s)\n") % b)
832 bookmarks.activate(repo, brev)
834 bookmarks.activate(repo, brev)
833 elif brev:
835 elif brev:
834 if repo._activebookmark:
836 if repo._activebookmark:
835 b = ui.label(repo._activebookmark, 'bookmarks')
837 b = ui.label(repo._activebookmark, 'bookmarks')
836 ui.status(_("(leaving bookmark %s)\n") % b)
838 ui.status(_("(leaving bookmark %s)\n") % b)
837 bookmarks.deactivate(repo)
839 bookmarks.deactivate(repo)
838
840
839 if warndest:
841 if warndest:
840 destutil.statusotherdests(ui, repo)
842 destutil.statusotherdests(ui, repo)
841
843
842 return ret
844 return ret
843
845
844 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
846 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
845 """Branch merge with node, resolving changes. Return true if any
847 """Branch merge with node, resolving changes. Return true if any
846 unresolved conflicts."""
848 unresolved conflicts."""
847 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
849 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
848 labels=labels)
850 labels=labels)
849 _showstats(repo, stats)
851 _showstats(repo, stats)
850 if stats[3]:
852 if stats[3]:
851 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
853 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
852 "or 'hg update -C .' to abandon\n"))
854 "or 'hg update -C .' to abandon\n"))
853 elif remind:
855 elif remind:
854 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
856 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
855 return stats[3] > 0
857 return stats[3] > 0
856
858
857 def _incoming(displaychlist, subreporecurse, ui, repo, source,
859 def _incoming(displaychlist, subreporecurse, ui, repo, source,
858 opts, buffered=False):
860 opts, buffered=False):
859 """
861 """
860 Helper for incoming / gincoming.
862 Helper for incoming / gincoming.
861 displaychlist gets called with
863 displaychlist gets called with
862 (remoterepo, incomingchangesetlist, displayer) parameters,
864 (remoterepo, incomingchangesetlist, displayer) parameters,
863 and is supposed to contain only code that can't be unified.
865 and is supposed to contain only code that can't be unified.
864 """
866 """
865 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
867 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
866 other = peer(repo, opts, source)
868 other = peer(repo, opts, source)
867 ui.status(_('comparing with %s\n') % util.hidepassword(source))
869 ui.status(_('comparing with %s\n') % util.hidepassword(source))
868 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
870 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
869
871
870 if revs:
872 if revs:
871 revs = [other.lookup(rev) for rev in revs]
873 revs = [other.lookup(rev) for rev in revs]
872 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
874 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
873 revs, opts["bundle"], opts["force"])
875 revs, opts["bundle"], opts["force"])
874 try:
876 try:
875 if not chlist:
877 if not chlist:
876 ui.status(_("no changes found\n"))
878 ui.status(_("no changes found\n"))
877 return subreporecurse()
879 return subreporecurse()
878 ui.pager('incoming')
880 ui.pager('incoming')
879 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
881 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
880 displaychlist(other, chlist, displayer)
882 displaychlist(other, chlist, displayer)
881 displayer.close()
883 displayer.close()
882 finally:
884 finally:
883 cleanupfn()
885 cleanupfn()
884 subreporecurse()
886 subreporecurse()
885 return 0 # exit code is zero since we found incoming changes
887 return 0 # exit code is zero since we found incoming changes
886
888
887 def incoming(ui, repo, source, opts):
889 def incoming(ui, repo, source, opts):
888 def subreporecurse():
890 def subreporecurse():
889 ret = 1
891 ret = 1
890 if opts.get('subrepos'):
892 if opts.get('subrepos'):
891 ctx = repo[None]
893 ctx = repo[None]
892 for subpath in sorted(ctx.substate):
894 for subpath in sorted(ctx.substate):
893 sub = ctx.sub(subpath)
895 sub = ctx.sub(subpath)
894 ret = min(ret, sub.incoming(ui, source, opts))
896 ret = min(ret, sub.incoming(ui, source, opts))
895 return ret
897 return ret
896
898
897 def display(other, chlist, displayer):
899 def display(other, chlist, displayer):
898 limit = cmdutil.loglimit(opts)
900 limit = cmdutil.loglimit(opts)
899 if opts.get('newest_first'):
901 if opts.get('newest_first'):
900 chlist.reverse()
902 chlist.reverse()
901 count = 0
903 count = 0
902 for n in chlist:
904 for n in chlist:
903 if limit is not None and count >= limit:
905 if limit is not None and count >= limit:
904 break
906 break
905 parents = [p for p in other.changelog.parents(n) if p != nullid]
907 parents = [p for p in other.changelog.parents(n) if p != nullid]
906 if opts.get('no_merges') and len(parents) == 2:
908 if opts.get('no_merges') and len(parents) == 2:
907 continue
909 continue
908 count += 1
910 count += 1
909 displayer.show(other[n])
911 displayer.show(other[n])
910 return _incoming(display, subreporecurse, ui, repo, source, opts)
912 return _incoming(display, subreporecurse, ui, repo, source, opts)
911
913
912 def _outgoing(ui, repo, dest, opts):
914 def _outgoing(ui, repo, dest, opts):
913 dest = ui.expandpath(dest or 'default-push', dest or 'default')
915 dest = ui.expandpath(dest or 'default-push', dest or 'default')
914 dest, branches = parseurl(dest, opts.get('branch'))
916 dest, branches = parseurl(dest, opts.get('branch'))
915 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
917 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
916 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
918 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
917 if revs:
919 if revs:
918 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
920 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
919
921
920 other = peer(repo, opts, dest)
922 other = peer(repo, opts, dest)
921 outgoing = discovery.findcommonoutgoing(repo, other, revs,
923 outgoing = discovery.findcommonoutgoing(repo, other, revs,
922 force=opts.get('force'))
924 force=opts.get('force'))
923 o = outgoing.missing
925 o = outgoing.missing
924 if not o:
926 if not o:
925 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
927 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
926 return o, other
928 return o, other
927
929
928 def outgoing(ui, repo, dest, opts):
930 def outgoing(ui, repo, dest, opts):
929 def recurse():
931 def recurse():
930 ret = 1
932 ret = 1
931 if opts.get('subrepos'):
933 if opts.get('subrepos'):
932 ctx = repo[None]
934 ctx = repo[None]
933 for subpath in sorted(ctx.substate):
935 for subpath in sorted(ctx.substate):
934 sub = ctx.sub(subpath)
936 sub = ctx.sub(subpath)
935 ret = min(ret, sub.outgoing(ui, dest, opts))
937 ret = min(ret, sub.outgoing(ui, dest, opts))
936 return ret
938 return ret
937
939
938 limit = cmdutil.loglimit(opts)
940 limit = cmdutil.loglimit(opts)
939 o, other = _outgoing(ui, repo, dest, opts)
941 o, other = _outgoing(ui, repo, dest, opts)
940 if not o:
942 if not o:
941 cmdutil.outgoinghooks(ui, repo, other, opts, o)
943 cmdutil.outgoinghooks(ui, repo, other, opts, o)
942 return recurse()
944 return recurse()
943
945
944 if opts.get('newest_first'):
946 if opts.get('newest_first'):
945 o.reverse()
947 o.reverse()
946 ui.pager('outgoing')
948 ui.pager('outgoing')
947 displayer = cmdutil.show_changeset(ui, repo, opts)
949 displayer = cmdutil.show_changeset(ui, repo, opts)
948 count = 0
950 count = 0
949 for n in o:
951 for n in o:
950 if limit is not None and count >= limit:
952 if limit is not None and count >= limit:
951 break
953 break
952 parents = [p for p in repo.changelog.parents(n) if p != nullid]
954 parents = [p for p in repo.changelog.parents(n) if p != nullid]
953 if opts.get('no_merges') and len(parents) == 2:
955 if opts.get('no_merges') and len(parents) == 2:
954 continue
956 continue
955 count += 1
957 count += 1
956 displayer.show(repo[n])
958 displayer.show(repo[n])
957 displayer.close()
959 displayer.close()
958 cmdutil.outgoinghooks(ui, repo, other, opts, o)
960 cmdutil.outgoinghooks(ui, repo, other, opts, o)
959 recurse()
961 recurse()
960 return 0 # exit code is zero since we found outgoing changes
962 return 0 # exit code is zero since we found outgoing changes
961
963
962 def verify(repo):
964 def verify(repo):
963 """verify the consistency of a repository"""
965 """verify the consistency of a repository"""
964 ret = verifymod.verify(repo)
966 ret = verifymod.verify(repo)
965
967
966 # Broken subrepo references in hidden csets don't seem worth worrying about,
968 # Broken subrepo references in hidden csets don't seem worth worrying about,
967 # since they can't be pushed/pulled, and --hidden can be used if they are a
969 # since they can't be pushed/pulled, and --hidden can be used if they are a
968 # concern.
970 # concern.
969
971
970 # pathto() is needed for -R case
972 # pathto() is needed for -R case
971 revs = repo.revs("filelog(%s)",
973 revs = repo.revs("filelog(%s)",
972 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
974 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
973
975
974 if revs:
976 if revs:
975 repo.ui.status(_('checking subrepo links\n'))
977 repo.ui.status(_('checking subrepo links\n'))
976 for rev in revs:
978 for rev in revs:
977 ctx = repo[rev]
979 ctx = repo[rev]
978 try:
980 try:
979 for subpath in ctx.substate:
981 for subpath in ctx.substate:
980 try:
982 try:
981 ret = (ctx.sub(subpath, allowcreate=False).verify()
983 ret = (ctx.sub(subpath, allowcreate=False).verify()
982 or ret)
984 or ret)
983 except error.RepoError as e:
985 except error.RepoError as e:
984 repo.ui.warn(('%s: %s\n') % (rev, e))
986 repo.ui.warn(('%s: %s\n') % (rev, e))
985 except Exception:
987 except Exception:
986 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
988 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
987 node.short(ctx.node()))
989 node.short(ctx.node()))
988
990
989 return ret
991 return ret
990
992
991 def remoteui(src, opts):
993 def remoteui(src, opts):
992 'build a remote ui from ui or repo and opts'
994 'build a remote ui from ui or repo and opts'
993 if util.safehasattr(src, 'baseui'): # looks like a repository
995 if util.safehasattr(src, 'baseui'): # looks like a repository
994 dst = src.baseui.copy() # drop repo-specific config
996 dst = src.baseui.copy() # drop repo-specific config
995 src = src.ui # copy target options from repo
997 src = src.ui # copy target options from repo
996 else: # assume it's a global ui object
998 else: # assume it's a global ui object
997 dst = src.copy() # keep all global options
999 dst = src.copy() # keep all global options
998
1000
999 # copy ssh-specific options
1001 # copy ssh-specific options
1000 for o in 'ssh', 'remotecmd':
1002 for o in 'ssh', 'remotecmd':
1001 v = opts.get(o) or src.config('ui', o)
1003 v = opts.get(o) or src.config('ui', o)
1002 if v:
1004 if v:
1003 dst.setconfig("ui", o, v, 'copied')
1005 dst.setconfig("ui", o, v, 'copied')
1004
1006
1005 # copy bundle-specific options
1007 # copy bundle-specific options
1006 r = src.config('bundle', 'mainreporoot')
1008 r = src.config('bundle', 'mainreporoot')
1007 if r:
1009 if r:
1008 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1010 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1009
1011
1010 # copy selected local settings to the remote ui
1012 # copy selected local settings to the remote ui
1011 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1013 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1012 for key, val in src.configitems(sect):
1014 for key, val in src.configitems(sect):
1013 dst.setconfig(sect, key, val, 'copied')
1015 dst.setconfig(sect, key, val, 'copied')
1014 v = src.config('web', 'cacerts')
1016 v = src.config('web', 'cacerts')
1015 if v:
1017 if v:
1016 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1018 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1017
1019
1018 return dst
1020 return dst
1019
1021
1020 # Files of interest
1022 # Files of interest
1021 # Used to check if the repository has changed looking at mtime and size of
1023 # Used to check if the repository has changed looking at mtime and size of
1022 # these files.
1024 # these files.
1023 foi = [('spath', '00changelog.i'),
1025 foi = [('spath', '00changelog.i'),
1024 ('spath', 'phaseroots'), # ! phase can change content at the same size
1026 ('spath', 'phaseroots'), # ! phase can change content at the same size
1025 ('spath', 'obsstore'),
1027 ('spath', 'obsstore'),
1026 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1028 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1027 ]
1029 ]
1028
1030
1029 class cachedlocalrepo(object):
1031 class cachedlocalrepo(object):
1030 """Holds a localrepository that can be cached and reused."""
1032 """Holds a localrepository that can be cached and reused."""
1031
1033
1032 def __init__(self, repo):
1034 def __init__(self, repo):
1033 """Create a new cached repo from an existing repo.
1035 """Create a new cached repo from an existing repo.
1034
1036
1035 We assume the passed in repo was recently created. If the
1037 We assume the passed in repo was recently created. If the
1036 repo has changed between when it was created and when it was
1038 repo has changed between when it was created and when it was
1037 turned into a cache, it may not refresh properly.
1039 turned into a cache, it may not refresh properly.
1038 """
1040 """
1039 assert isinstance(repo, localrepo.localrepository)
1041 assert isinstance(repo, localrepo.localrepository)
1040 self._repo = repo
1042 self._repo = repo
1041 self._state, self.mtime = self._repostate()
1043 self._state, self.mtime = self._repostate()
1042 self._filtername = repo.filtername
1044 self._filtername = repo.filtername
1043
1045
1044 def fetch(self):
1046 def fetch(self):
1045 """Refresh (if necessary) and return a repository.
1047 """Refresh (if necessary) and return a repository.
1046
1048
1047 If the cached instance is out of date, it will be recreated
1049 If the cached instance is out of date, it will be recreated
1048 automatically and returned.
1050 automatically and returned.
1049
1051
1050 Returns a tuple of the repo and a boolean indicating whether a new
1052 Returns a tuple of the repo and a boolean indicating whether a new
1051 repo instance was created.
1053 repo instance was created.
1052 """
1054 """
1053 # We compare the mtimes and sizes of some well-known files to
1055 # We compare the mtimes and sizes of some well-known files to
1054 # determine if the repo changed. This is not precise, as mtimes
1056 # determine if the repo changed. This is not precise, as mtimes
1055 # are susceptible to clock skew and imprecise filesystems and
1057 # are susceptible to clock skew and imprecise filesystems and
1056 # file content can change while maintaining the same size.
1058 # file content can change while maintaining the same size.
1057
1059
1058 state, mtime = self._repostate()
1060 state, mtime = self._repostate()
1059 if state == self._state:
1061 if state == self._state:
1060 return self._repo, False
1062 return self._repo, False
1061
1063
1062 repo = repository(self._repo.baseui, self._repo.url())
1064 repo = repository(self._repo.baseui, self._repo.url())
1063 if self._filtername:
1065 if self._filtername:
1064 self._repo = repo.filtered(self._filtername)
1066 self._repo = repo.filtered(self._filtername)
1065 else:
1067 else:
1066 self._repo = repo.unfiltered()
1068 self._repo = repo.unfiltered()
1067 self._state = state
1069 self._state = state
1068 self.mtime = mtime
1070 self.mtime = mtime
1069
1071
1070 return self._repo, True
1072 return self._repo, True
1071
1073
1072 def _repostate(self):
1074 def _repostate(self):
1073 state = []
1075 state = []
1074 maxmtime = -1
1076 maxmtime = -1
1075 for attr, fname in foi:
1077 for attr, fname in foi:
1076 prefix = getattr(self._repo, attr)
1078 prefix = getattr(self._repo, attr)
1077 p = os.path.join(prefix, fname)
1079 p = os.path.join(prefix, fname)
1078 try:
1080 try:
1079 st = os.stat(p)
1081 st = os.stat(p)
1080 except OSError:
1082 except OSError:
1081 st = os.stat(prefix)
1083 st = os.stat(prefix)
1082 state.append((st.st_mtime, st.st_size))
1084 state.append((st.st_mtime, st.st_size))
1083 maxmtime = max(maxmtime, st.st_mtime)
1085 maxmtime = max(maxmtime, st.st_mtime)
1084
1086
1085 return tuple(state), maxmtime
1087 return tuple(state), maxmtime
1086
1088
1087 def copy(self):
1089 def copy(self):
1088 """Obtain a copy of this class instance.
1090 """Obtain a copy of this class instance.
1089
1091
1090 A new localrepository instance is obtained. The new instance should be
1092 A new localrepository instance is obtained. The new instance should be
1091 completely independent of the original.
1093 completely independent of the original.
1092 """
1094 """
1093 repo = repository(self._repo.baseui, self._repo.origroot)
1095 repo = repository(self._repo.baseui, self._repo.origroot)
1094 if self._filtername:
1096 if self._filtername:
1095 repo = repo.filtered(self._filtername)
1097 repo = repo.filtered(self._filtername)
1096 else:
1098 else:
1097 repo = repo.unfiltered()
1099 repo = repo.unfiltered()
1098 c = cachedlocalrepo(repo)
1100 c = cachedlocalrepo(repo)
1099 c._state = self._state
1101 c._state = self._state
1100 c.mtime = self.mtime
1102 c.mtime = self.mtime
1101 return c
1103 return c
General Comments 0
You need to be logged in to leave comments. Login now