##// END OF EJS Templates
clone: use utility function to write hgrc
Yuya Nishihara -
r35638:545967ec default
parent child Browse files
Show More
@@ -1,1112 +1,1109
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 logexchange,
31 logexchange,
32 merge as mergemod,
32 merge as mergemod,
33 node,
33 node,
34 phases,
34 phases,
35 repoview,
35 repoview,
36 scmutil,
36 scmutil,
37 sshpeer,
37 sshpeer,
38 statichttprepo,
38 statichttprepo,
39 ui as uimod,
39 ui as uimod,
40 unionrepo,
40 unionrepo,
41 url,
41 url,
42 util,
42 util,
43 verify as verifymod,
43 verify as verifymod,
44 vfs as vfsmod,
44 vfs as vfsmod,
45 )
45 )
46
46
47 release = lock.release
47 release = lock.release
48
48
49 # shared features
49 # shared features
50 sharedbookmarks = 'bookmarks'
50 sharedbookmarks = 'bookmarks'
51
51
52 def _local(path):
52 def _local(path):
53 path = util.expandpath(util.urllocalpath(path))
53 path = util.expandpath(util.urllocalpath(path))
54 return (os.path.isfile(path) and bundlerepo or localrepo)
54 return (os.path.isfile(path) and bundlerepo or localrepo)
55
55
56 def addbranchrevs(lrepo, other, branches, revs):
56 def addbranchrevs(lrepo, other, branches, revs):
57 peer = other.peer() # a courtesy to callers using a localrepo for other
57 peer = other.peer() # a courtesy to callers using a localrepo for other
58 hashbranch, branches = branches
58 hashbranch, branches = branches
59 if not hashbranch and not branches:
59 if not hashbranch and not branches:
60 x = revs or None
60 x = revs or None
61 if util.safehasattr(revs, 'first'):
61 if util.safehasattr(revs, 'first'):
62 y = revs.first()
62 y = revs.first()
63 elif revs:
63 elif revs:
64 y = revs[0]
64 y = revs[0]
65 else:
65 else:
66 y = None
66 y = None
67 return x, y
67 return x, y
68 if revs:
68 if revs:
69 revs = list(revs)
69 revs = list(revs)
70 else:
70 else:
71 revs = []
71 revs = []
72
72
73 if not peer.capable('branchmap'):
73 if not peer.capable('branchmap'):
74 if branches:
74 if branches:
75 raise error.Abort(_("remote branch lookup not supported"))
75 raise error.Abort(_("remote branch lookup not supported"))
76 revs.append(hashbranch)
76 revs.append(hashbranch)
77 return revs, revs[0]
77 return revs, revs[0]
78 branchmap = peer.branchmap()
78 branchmap = peer.branchmap()
79
79
80 def primary(branch):
80 def primary(branch):
81 if branch == '.':
81 if branch == '.':
82 if not lrepo:
82 if not lrepo:
83 raise error.Abort(_("dirstate branch not accessible"))
83 raise error.Abort(_("dirstate branch not accessible"))
84 branch = lrepo.dirstate.branch()
84 branch = lrepo.dirstate.branch()
85 if branch in branchmap:
85 if branch in branchmap:
86 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
87 return True
87 return True
88 else:
88 else:
89 return False
89 return False
90
90
91 for branch in branches:
91 for branch in branches:
92 if not primary(branch):
92 if not primary(branch):
93 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
94 if hashbranch:
94 if hashbranch:
95 if not primary(hashbranch):
95 if not primary(hashbranch):
96 revs.append(hashbranch)
96 revs.append(hashbranch)
97 return revs, revs[0]
97 return revs, revs[0]
98
98
99 def parseurl(path, branches=None):
99 def parseurl(path, branches=None):
100 '''parse url#branch, returning (url, (branch, branches))'''
100 '''parse url#branch, returning (url, (branch, branches))'''
101
101
102 u = util.url(path)
102 u = util.url(path)
103 branch = None
103 branch = None
104 if u.fragment:
104 if u.fragment:
105 branch = u.fragment
105 branch = u.fragment
106 u.fragment = None
106 u.fragment = None
107 return bytes(u), (branch, branches or [])
107 return bytes(u), (branch, branches or [])
108
108
109 schemes = {
109 schemes = {
110 'bundle': bundlerepo,
110 'bundle': bundlerepo,
111 'union': unionrepo,
111 'union': unionrepo,
112 'file': _local,
112 'file': _local,
113 'http': httppeer,
113 'http': httppeer,
114 'https': httppeer,
114 'https': httppeer,
115 'ssh': sshpeer,
115 'ssh': sshpeer,
116 'static-http': statichttprepo,
116 'static-http': statichttprepo,
117 }
117 }
118
118
119 def _peerlookup(path):
119 def _peerlookup(path):
120 u = util.url(path)
120 u = util.url(path)
121 scheme = u.scheme or 'file'
121 scheme = u.scheme or 'file'
122 thing = schemes.get(scheme) or schemes['file']
122 thing = schemes.get(scheme) or schemes['file']
123 try:
123 try:
124 return thing(path)
124 return thing(path)
125 except TypeError:
125 except TypeError:
126 # we can't test callable(thing) because 'thing' can be an unloaded
126 # we can't test callable(thing) because 'thing' can be an unloaded
127 # module that implements __call__
127 # module that implements __call__
128 if not util.safehasattr(thing, 'instance'):
128 if not util.safehasattr(thing, 'instance'):
129 raise
129 raise
130 return thing
130 return thing
131
131
132 def islocal(repo):
132 def islocal(repo):
133 '''return true if repo (or path pointing to repo) is local'''
133 '''return true if repo (or path pointing to repo) is local'''
134 if isinstance(repo, bytes):
134 if isinstance(repo, bytes):
135 try:
135 try:
136 return _peerlookup(repo).islocal(repo)
136 return _peerlookup(repo).islocal(repo)
137 except AttributeError:
137 except AttributeError:
138 return False
138 return False
139 return repo.local()
139 return repo.local()
140
140
141 def openpath(ui, path):
141 def openpath(ui, path):
142 '''open path with open if local, url.open if remote'''
142 '''open path with open if local, url.open if remote'''
143 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 pathurl = util.url(path, parsequery=False, parsefragment=False)
144 if pathurl.islocal():
144 if pathurl.islocal():
145 return util.posixfile(pathurl.localpath(), 'rb')
145 return util.posixfile(pathurl.localpath(), 'rb')
146 else:
146 else:
147 return url.open(ui, path)
147 return url.open(ui, path)
148
148
149 # a list of (ui, repo) functions called for wire peer initialization
149 # a list of (ui, repo) functions called for wire peer initialization
150 wirepeersetupfuncs = []
150 wirepeersetupfuncs = []
151
151
152 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
153 """return a repository object for the specified path"""
153 """return a repository object for the specified path"""
154 obj = _peerlookup(path).instance(ui, path, create)
154 obj = _peerlookup(path).instance(ui, path, create)
155 ui = getattr(obj, "ui", ui)
155 ui = getattr(obj, "ui", ui)
156 for f in presetupfuncs or []:
156 for f in presetupfuncs or []:
157 f(ui, obj)
157 f(ui, obj)
158 for name, module in extensions.extensions(ui):
158 for name, module in extensions.extensions(ui):
159 hook = getattr(module, 'reposetup', None)
159 hook = getattr(module, 'reposetup', None)
160 if hook:
160 if hook:
161 hook(ui, obj)
161 hook(ui, obj)
162 if not obj.local():
162 if not obj.local():
163 for f in wirepeersetupfuncs:
163 for f in wirepeersetupfuncs:
164 f(ui, obj)
164 f(ui, obj)
165 return obj
165 return obj
166
166
167 def repository(ui, path='', create=False, presetupfuncs=None):
167 def repository(ui, path='', create=False, presetupfuncs=None):
168 """return a repository object for the specified path"""
168 """return a repository object for the specified path"""
169 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
170 repo = peer.local()
170 repo = peer.local()
171 if not repo:
171 if not repo:
172 raise error.Abort(_("repository '%s' is not local") %
172 raise error.Abort(_("repository '%s' is not local") %
173 (path or peer.url()))
173 (path or peer.url()))
174 return repo.filtered('visible')
174 return repo.filtered('visible')
175
175
176 def peer(uiorrepo, opts, path, create=False):
176 def peer(uiorrepo, opts, path, create=False):
177 '''return a repository peer for the specified path'''
177 '''return a repository peer for the specified path'''
178 rui = remoteui(uiorrepo, opts)
178 rui = remoteui(uiorrepo, opts)
179 return _peerorrepo(rui, path, create).peer()
179 return _peerorrepo(rui, path, create).peer()
180
180
181 def defaultdest(source):
181 def defaultdest(source):
182 '''return default destination of clone if none is given
182 '''return default destination of clone if none is given
183
183
184 >>> defaultdest(b'foo')
184 >>> defaultdest(b'foo')
185 'foo'
185 'foo'
186 >>> defaultdest(b'/foo/bar')
186 >>> defaultdest(b'/foo/bar')
187 'bar'
187 'bar'
188 >>> defaultdest(b'/')
188 >>> defaultdest(b'/')
189 ''
189 ''
190 >>> defaultdest(b'')
190 >>> defaultdest(b'')
191 ''
191 ''
192 >>> defaultdest(b'http://example.org/')
192 >>> defaultdest(b'http://example.org/')
193 ''
193 ''
194 >>> defaultdest(b'http://example.org/foo/')
194 >>> defaultdest(b'http://example.org/foo/')
195 'foo'
195 'foo'
196 '''
196 '''
197 path = util.url(source).path
197 path = util.url(source).path
198 if not path:
198 if not path:
199 return ''
199 return ''
200 return os.path.basename(os.path.normpath(path))
200 return os.path.basename(os.path.normpath(path))
201
201
202 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
203 relative=False):
203 relative=False):
204 '''create a shared repository'''
204 '''create a shared repository'''
205
205
206 if not islocal(source):
206 if not islocal(source):
207 raise error.Abort(_('can only share local repositories'))
207 raise error.Abort(_('can only share local repositories'))
208
208
209 if not dest:
209 if not dest:
210 dest = defaultdest(source)
210 dest = defaultdest(source)
211 else:
211 else:
212 dest = ui.expandpath(dest)
212 dest = ui.expandpath(dest)
213
213
214 if isinstance(source, str):
214 if isinstance(source, str):
215 origsource = ui.expandpath(source)
215 origsource = ui.expandpath(source)
216 source, branches = parseurl(origsource)
216 source, branches = parseurl(origsource)
217 srcrepo = repository(ui, source)
217 srcrepo = repository(ui, source)
218 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
219 else:
219 else:
220 srcrepo = source.local()
220 srcrepo = source.local()
221 origsource = source = srcrepo.url()
221 origsource = source = srcrepo.url()
222 checkout = None
222 checkout = None
223
223
224 sharedpath = srcrepo.sharedpath # if our source is already sharing
224 sharedpath = srcrepo.sharedpath # if our source is already sharing
225
225
226 destwvfs = vfsmod.vfs(dest, realpath=True)
226 destwvfs = vfsmod.vfs(dest, realpath=True)
227 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
228
228
229 if destvfs.lexists():
229 if destvfs.lexists():
230 raise error.Abort(_('destination already exists'))
230 raise error.Abort(_('destination already exists'))
231
231
232 if not destwvfs.isdir():
232 if not destwvfs.isdir():
233 destwvfs.mkdir()
233 destwvfs.mkdir()
234 destvfs.makedir()
234 destvfs.makedir()
235
235
236 requirements = ''
236 requirements = ''
237 try:
237 try:
238 requirements = srcrepo.vfs.read('requires')
238 requirements = srcrepo.vfs.read('requires')
239 except IOError as inst:
239 except IOError as inst:
240 if inst.errno != errno.ENOENT:
240 if inst.errno != errno.ENOENT:
241 raise
241 raise
242
242
243 if relative:
243 if relative:
244 try:
244 try:
245 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 sharedpath = os.path.relpath(sharedpath, destvfs.base)
246 requirements += 'relshared\n'
246 requirements += 'relshared\n'
247 except (IOError, ValueError) as e:
247 except (IOError, ValueError) as e:
248 # ValueError is raised on Windows if the drive letters differ on
248 # ValueError is raised on Windows if the drive letters differ on
249 # each path
249 # each path
250 raise error.Abort(_('cannot calculate relative path'),
250 raise error.Abort(_('cannot calculate relative path'),
251 hint=str(e))
251 hint=str(e))
252 else:
252 else:
253 requirements += 'shared\n'
253 requirements += 'shared\n'
254
254
255 destvfs.write('requires', requirements)
255 destvfs.write('requires', requirements)
256 destvfs.write('sharedpath', sharedpath)
256 destvfs.write('sharedpath', sharedpath)
257
257
258 r = repository(ui, destwvfs.base)
258 r = repository(ui, destwvfs.base)
259 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
259 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
260 _postshareupdate(r, update, checkout=checkout)
260 _postshareupdate(r, update, checkout=checkout)
261 return r
261 return r
262
262
263 def unshare(ui, repo):
263 def unshare(ui, repo):
264 """convert a shared repository to a normal one
264 """convert a shared repository to a normal one
265
265
266 Copy the store data to the repo and remove the sharedpath data.
266 Copy the store data to the repo and remove the sharedpath data.
267 """
267 """
268
268
269 destlock = lock = None
269 destlock = lock = None
270 lock = repo.lock()
270 lock = repo.lock()
271 try:
271 try:
272 # we use locks here because if we race with commit, we
272 # we use locks here because if we race with commit, we
273 # can end up with extra data in the cloned revlogs that's
273 # can end up with extra data in the cloned revlogs that's
274 # not pointed to by changesets, thus causing verify to
274 # not pointed to by changesets, thus causing verify to
275 # fail
275 # fail
276
276
277 destlock = copystore(ui, repo, repo.path)
277 destlock = copystore(ui, repo, repo.path)
278
278
279 sharefile = repo.vfs.join('sharedpath')
279 sharefile = repo.vfs.join('sharedpath')
280 util.rename(sharefile, sharefile + '.old')
280 util.rename(sharefile, sharefile + '.old')
281
281
282 repo.requirements.discard('shared')
282 repo.requirements.discard('shared')
283 repo.requirements.discard('relshared')
283 repo.requirements.discard('relshared')
284 repo._writerequirements()
284 repo._writerequirements()
285 finally:
285 finally:
286 destlock and destlock.release()
286 destlock and destlock.release()
287 lock and lock.release()
287 lock and lock.release()
288
288
289 # update store, spath, svfs and sjoin of repo
289 # update store, spath, svfs and sjoin of repo
290 repo.unfiltered().__init__(repo.baseui, repo.root)
290 repo.unfiltered().__init__(repo.baseui, repo.root)
291
291
292 # TODO: figure out how to access subrepos that exist, but were previously
292 # TODO: figure out how to access subrepos that exist, but were previously
293 # removed from .hgsub
293 # removed from .hgsub
294 c = repo['.']
294 c = repo['.']
295 subs = c.substate
295 subs = c.substate
296 for s in sorted(subs):
296 for s in sorted(subs):
297 c.sub(s).unshare()
297 c.sub(s).unshare()
298
298
299 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
299 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
300 """Called after a new shared repo is created.
300 """Called after a new shared repo is created.
301
301
302 The new repo only has a requirements file and pointer to the source.
302 The new repo only has a requirements file and pointer to the source.
303 This function configures additional shared data.
303 This function configures additional shared data.
304
304
305 Extensions can wrap this function and write additional entries to
305 Extensions can wrap this function and write additional entries to
306 destrepo/.hg/shared to indicate additional pieces of data to be shared.
306 destrepo/.hg/shared to indicate additional pieces of data to be shared.
307 """
307 """
308 default = defaultpath or sourcerepo.ui.config('paths', 'default')
308 default = defaultpath or sourcerepo.ui.config('paths', 'default')
309 if default:
309 if default:
310 with destrepo.vfs("hgrc", "w", text=True) as fp:
310 with destrepo.vfs("hgrc", "w", text=True) as fp:
311 fp.write("[paths]\n")
311 fp.write("[paths]\n")
312 fp.write("default = %s\n" % default)
312 fp.write("default = %s\n" % default)
313
313
314 with destrepo.wlock():
314 with destrepo.wlock():
315 if bookmarks:
315 if bookmarks:
316 destrepo.vfs.write('shared', sharedbookmarks + '\n')
316 destrepo.vfs.write('shared', sharedbookmarks + '\n')
317
317
318 def _postshareupdate(repo, update, checkout=None):
318 def _postshareupdate(repo, update, checkout=None):
319 """Maybe perform a working directory update after a shared repo is created.
319 """Maybe perform a working directory update after a shared repo is created.
320
320
321 ``update`` can be a boolean or a revision to update to.
321 ``update`` can be a boolean or a revision to update to.
322 """
322 """
323 if not update:
323 if not update:
324 return
324 return
325
325
326 repo.ui.status(_("updating working directory\n"))
326 repo.ui.status(_("updating working directory\n"))
327 if update is not True:
327 if update is not True:
328 checkout = update
328 checkout = update
329 for test in (checkout, 'default', 'tip'):
329 for test in (checkout, 'default', 'tip'):
330 if test is None:
330 if test is None:
331 continue
331 continue
332 try:
332 try:
333 uprev = repo.lookup(test)
333 uprev = repo.lookup(test)
334 break
334 break
335 except error.RepoLookupError:
335 except error.RepoLookupError:
336 continue
336 continue
337 _update(repo, uprev)
337 _update(repo, uprev)
338
338
339 def copystore(ui, srcrepo, destpath):
339 def copystore(ui, srcrepo, destpath):
340 '''copy files from store of srcrepo in destpath
340 '''copy files from store of srcrepo in destpath
341
341
342 returns destlock
342 returns destlock
343 '''
343 '''
344 destlock = None
344 destlock = None
345 try:
345 try:
346 hardlink = None
346 hardlink = None
347 num = 0
347 num = 0
348 closetopic = [None]
348 closetopic = [None]
349 def prog(topic, pos):
349 def prog(topic, pos):
350 if pos is None:
350 if pos is None:
351 closetopic[0] = topic
351 closetopic[0] = topic
352 else:
352 else:
353 ui.progress(topic, pos + num)
353 ui.progress(topic, pos + num)
354 srcpublishing = srcrepo.publishing()
354 srcpublishing = srcrepo.publishing()
355 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
355 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
356 dstvfs = vfsmod.vfs(destpath)
356 dstvfs = vfsmod.vfs(destpath)
357 for f in srcrepo.store.copylist():
357 for f in srcrepo.store.copylist():
358 if srcpublishing and f.endswith('phaseroots'):
358 if srcpublishing and f.endswith('phaseroots'):
359 continue
359 continue
360 dstbase = os.path.dirname(f)
360 dstbase = os.path.dirname(f)
361 if dstbase and not dstvfs.exists(dstbase):
361 if dstbase and not dstvfs.exists(dstbase):
362 dstvfs.mkdir(dstbase)
362 dstvfs.mkdir(dstbase)
363 if srcvfs.exists(f):
363 if srcvfs.exists(f):
364 if f.endswith('data'):
364 if f.endswith('data'):
365 # 'dstbase' may be empty (e.g. revlog format 0)
365 # 'dstbase' may be empty (e.g. revlog format 0)
366 lockfile = os.path.join(dstbase, "lock")
366 lockfile = os.path.join(dstbase, "lock")
367 # lock to avoid premature writing to the target
367 # lock to avoid premature writing to the target
368 destlock = lock.lock(dstvfs, lockfile)
368 destlock = lock.lock(dstvfs, lockfile)
369 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
369 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
370 hardlink, progress=prog)
370 hardlink, progress=prog)
371 num += n
371 num += n
372 if hardlink:
372 if hardlink:
373 ui.debug("linked %d files\n" % num)
373 ui.debug("linked %d files\n" % num)
374 if closetopic[0]:
374 if closetopic[0]:
375 ui.progress(closetopic[0], None)
375 ui.progress(closetopic[0], None)
376 else:
376 else:
377 ui.debug("copied %d files\n" % num)
377 ui.debug("copied %d files\n" % num)
378 if closetopic[0]:
378 if closetopic[0]:
379 ui.progress(closetopic[0], None)
379 ui.progress(closetopic[0], None)
380 return destlock
380 return destlock
381 except: # re-raises
381 except: # re-raises
382 release(destlock)
382 release(destlock)
383 raise
383 raise
384
384
385 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
385 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
386 rev=None, update=True, stream=False):
386 rev=None, update=True, stream=False):
387 """Perform a clone using a shared repo.
387 """Perform a clone using a shared repo.
388
388
389 The store for the repository will be located at <sharepath>/.hg. The
389 The store for the repository will be located at <sharepath>/.hg. The
390 specified revisions will be cloned or pulled from "source". A shared repo
390 specified revisions will be cloned or pulled from "source". A shared repo
391 will be created at "dest" and a working copy will be created if "update" is
391 will be created at "dest" and a working copy will be created if "update" is
392 True.
392 True.
393 """
393 """
394 revs = None
394 revs = None
395 if rev:
395 if rev:
396 if not srcpeer.capable('lookup'):
396 if not srcpeer.capable('lookup'):
397 raise error.Abort(_("src repository does not support "
397 raise error.Abort(_("src repository does not support "
398 "revision lookup and so doesn't "
398 "revision lookup and so doesn't "
399 "support clone by revision"))
399 "support clone by revision"))
400 revs = [srcpeer.lookup(r) for r in rev]
400 revs = [srcpeer.lookup(r) for r in rev]
401
401
402 # Obtain a lock before checking for or cloning the pooled repo otherwise
402 # Obtain a lock before checking for or cloning the pooled repo otherwise
403 # 2 clients may race creating or populating it.
403 # 2 clients may race creating or populating it.
404 pooldir = os.path.dirname(sharepath)
404 pooldir = os.path.dirname(sharepath)
405 # lock class requires the directory to exist.
405 # lock class requires the directory to exist.
406 try:
406 try:
407 util.makedir(pooldir, False)
407 util.makedir(pooldir, False)
408 except OSError as e:
408 except OSError as e:
409 if e.errno != errno.EEXIST:
409 if e.errno != errno.EEXIST:
410 raise
410 raise
411
411
412 poolvfs = vfsmod.vfs(pooldir)
412 poolvfs = vfsmod.vfs(pooldir)
413 basename = os.path.basename(sharepath)
413 basename = os.path.basename(sharepath)
414
414
415 with lock.lock(poolvfs, '%s.lock' % basename):
415 with lock.lock(poolvfs, '%s.lock' % basename):
416 if os.path.exists(sharepath):
416 if os.path.exists(sharepath):
417 ui.status(_('(sharing from existing pooled repository %s)\n') %
417 ui.status(_('(sharing from existing pooled repository %s)\n') %
418 basename)
418 basename)
419 else:
419 else:
420 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
420 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
421 # Always use pull mode because hardlinks in share mode don't work
421 # Always use pull mode because hardlinks in share mode don't work
422 # well. Never update because working copies aren't necessary in
422 # well. Never update because working copies aren't necessary in
423 # share mode.
423 # share mode.
424 clone(ui, peeropts, source, dest=sharepath, pull=True,
424 clone(ui, peeropts, source, dest=sharepath, pull=True,
425 rev=rev, update=False, stream=stream)
425 rev=rev, update=False, stream=stream)
426
426
427 # Resolve the value to put in [paths] section for the source.
427 # Resolve the value to put in [paths] section for the source.
428 if islocal(source):
428 if islocal(source):
429 defaultpath = os.path.abspath(util.urllocalpath(source))
429 defaultpath = os.path.abspath(util.urllocalpath(source))
430 else:
430 else:
431 defaultpath = source
431 defaultpath = source
432
432
433 sharerepo = repository(ui, path=sharepath)
433 sharerepo = repository(ui, path=sharepath)
434 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
434 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
435 defaultpath=defaultpath)
435 defaultpath=defaultpath)
436
436
437 # We need to perform a pull against the dest repo to fetch bookmarks
437 # We need to perform a pull against the dest repo to fetch bookmarks
438 # and other non-store data that isn't shared by default. In the case of
438 # and other non-store data that isn't shared by default. In the case of
439 # non-existing shared repo, this means we pull from the remote twice. This
439 # non-existing shared repo, this means we pull from the remote twice. This
440 # is a bit weird. But at the time it was implemented, there wasn't an easy
440 # is a bit weird. But at the time it was implemented, there wasn't an easy
441 # way to pull just non-changegroup data.
441 # way to pull just non-changegroup data.
442 destrepo = repository(ui, path=dest)
442 destrepo = repository(ui, path=dest)
443 exchange.pull(destrepo, srcpeer, heads=revs)
443 exchange.pull(destrepo, srcpeer, heads=revs)
444
444
445 _postshareupdate(destrepo, update)
445 _postshareupdate(destrepo, update)
446
446
447 return srcpeer, peer(ui, peeropts, dest)
447 return srcpeer, peer(ui, peeropts, dest)
448
448
449 # Recomputing branch cache might be slow on big repos,
449 # Recomputing branch cache might be slow on big repos,
450 # so just copy it
450 # so just copy it
451 def _copycache(srcrepo, dstcachedir, fname):
451 def _copycache(srcrepo, dstcachedir, fname):
452 """copy a cache from srcrepo to destcachedir (if it exists)"""
452 """copy a cache from srcrepo to destcachedir (if it exists)"""
453 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
453 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
454 dstbranchcache = os.path.join(dstcachedir, fname)
454 dstbranchcache = os.path.join(dstcachedir, fname)
455 if os.path.exists(srcbranchcache):
455 if os.path.exists(srcbranchcache):
456 if not os.path.exists(dstcachedir):
456 if not os.path.exists(dstcachedir):
457 os.mkdir(dstcachedir)
457 os.mkdir(dstcachedir)
458 util.copyfile(srcbranchcache, dstbranchcache)
458 util.copyfile(srcbranchcache, dstbranchcache)
459
459
460 def _cachetocopy(srcrepo):
460 def _cachetocopy(srcrepo):
461 """return the list of cache file valuable to copy during a clone"""
461 """return the list of cache file valuable to copy during a clone"""
462 # In local clones we're copying all nodes, not just served
462 # In local clones we're copying all nodes, not just served
463 # ones. Therefore copy all branch caches over.
463 # ones. Therefore copy all branch caches over.
464 cachefiles = ['branch2']
464 cachefiles = ['branch2']
465 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
465 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
466 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
466 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
467 cachefiles += ['tags2']
467 cachefiles += ['tags2']
468 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
468 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
469 cachefiles += ['hgtagsfnodes1']
469 cachefiles += ['hgtagsfnodes1']
470 return cachefiles
470 return cachefiles
471
471
472 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
472 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
473 update=True, stream=False, branch=None, shareopts=None):
473 update=True, stream=False, branch=None, shareopts=None):
474 """Make a copy of an existing repository.
474 """Make a copy of an existing repository.
475
475
476 Create a copy of an existing repository in a new directory. The
476 Create a copy of an existing repository in a new directory. The
477 source and destination are URLs, as passed to the repository
477 source and destination are URLs, as passed to the repository
478 function. Returns a pair of repository peers, the source and
478 function. Returns a pair of repository peers, the source and
479 newly created destination.
479 newly created destination.
480
480
481 The location of the source is added to the new repository's
481 The location of the source is added to the new repository's
482 .hg/hgrc file, as the default to be used for future pulls and
482 .hg/hgrc file, as the default to be used for future pulls and
483 pushes.
483 pushes.
484
484
485 If an exception is raised, the partly cloned/updated destination
485 If an exception is raised, the partly cloned/updated destination
486 repository will be deleted.
486 repository will be deleted.
487
487
488 Arguments:
488 Arguments:
489
489
490 source: repository object or URL
490 source: repository object or URL
491
491
492 dest: URL of destination repository to create (defaults to base
492 dest: URL of destination repository to create (defaults to base
493 name of source repository)
493 name of source repository)
494
494
495 pull: always pull from source repository, even in local case or if the
495 pull: always pull from source repository, even in local case or if the
496 server prefers streaming
496 server prefers streaming
497
497
498 stream: stream raw data uncompressed from repository (fast over
498 stream: stream raw data uncompressed from repository (fast over
499 LAN, slow over WAN)
499 LAN, slow over WAN)
500
500
501 rev: revision to clone up to (implies pull=True)
501 rev: revision to clone up to (implies pull=True)
502
502
503 update: update working directory after clone completes, if
503 update: update working directory after clone completes, if
504 destination is local repository (True means update to default rev,
504 destination is local repository (True means update to default rev,
505 anything else is treated as a revision)
505 anything else is treated as a revision)
506
506
507 branch: branches to clone
507 branch: branches to clone
508
508
509 shareopts: dict of options to control auto sharing behavior. The "pool" key
509 shareopts: dict of options to control auto sharing behavior. The "pool" key
510 activates auto sharing mode and defines the directory for stores. The
510 activates auto sharing mode and defines the directory for stores. The
511 "mode" key determines how to construct the directory name of the shared
511 "mode" key determines how to construct the directory name of the shared
512 repository. "identity" means the name is derived from the node of the first
512 repository. "identity" means the name is derived from the node of the first
513 changeset in the repository. "remote" means the name is derived from the
513 changeset in the repository. "remote" means the name is derived from the
514 remote's path/URL. Defaults to "identity."
514 remote's path/URL. Defaults to "identity."
515 """
515 """
516
516
517 if isinstance(source, bytes):
517 if isinstance(source, bytes):
518 origsource = ui.expandpath(source)
518 origsource = ui.expandpath(source)
519 source, branch = parseurl(origsource, branch)
519 source, branch = parseurl(origsource, branch)
520 srcpeer = peer(ui, peeropts, source)
520 srcpeer = peer(ui, peeropts, source)
521 else:
521 else:
522 srcpeer = source.peer() # in case we were called with a localrepo
522 srcpeer = source.peer() # in case we were called with a localrepo
523 branch = (None, branch or [])
523 branch = (None, branch or [])
524 origsource = source = srcpeer.url()
524 origsource = source = srcpeer.url()
525 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
525 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
526
526
527 if dest is None:
527 if dest is None:
528 dest = defaultdest(source)
528 dest = defaultdest(source)
529 if dest:
529 if dest:
530 ui.status(_("destination directory: %s\n") % dest)
530 ui.status(_("destination directory: %s\n") % dest)
531 else:
531 else:
532 dest = ui.expandpath(dest)
532 dest = ui.expandpath(dest)
533
533
534 dest = util.urllocalpath(dest)
534 dest = util.urllocalpath(dest)
535 source = util.urllocalpath(source)
535 source = util.urllocalpath(source)
536
536
537 if not dest:
537 if not dest:
538 raise error.Abort(_("empty destination path is not valid"))
538 raise error.Abort(_("empty destination path is not valid"))
539
539
540 destvfs = vfsmod.vfs(dest, expandpath=True)
540 destvfs = vfsmod.vfs(dest, expandpath=True)
541 if destvfs.lexists():
541 if destvfs.lexists():
542 if not destvfs.isdir():
542 if not destvfs.isdir():
543 raise error.Abort(_("destination '%s' already exists") % dest)
543 raise error.Abort(_("destination '%s' already exists") % dest)
544 elif destvfs.listdir():
544 elif destvfs.listdir():
545 raise error.Abort(_("destination '%s' is not empty") % dest)
545 raise error.Abort(_("destination '%s' is not empty") % dest)
546
546
547 shareopts = shareopts or {}
547 shareopts = shareopts or {}
548 sharepool = shareopts.get('pool')
548 sharepool = shareopts.get('pool')
549 sharenamemode = shareopts.get('mode')
549 sharenamemode = shareopts.get('mode')
550 if sharepool and islocal(dest):
550 if sharepool and islocal(dest):
551 sharepath = None
551 sharepath = None
552 if sharenamemode == 'identity':
552 if sharenamemode == 'identity':
553 # Resolve the name from the initial changeset in the remote
553 # Resolve the name from the initial changeset in the remote
554 # repository. This returns nullid when the remote is empty. It
554 # repository. This returns nullid when the remote is empty. It
555 # raises RepoLookupError if revision 0 is filtered or otherwise
555 # raises RepoLookupError if revision 0 is filtered or otherwise
556 # not available. If we fail to resolve, sharing is not enabled.
556 # not available. If we fail to resolve, sharing is not enabled.
557 try:
557 try:
558 rootnode = srcpeer.lookup('0')
558 rootnode = srcpeer.lookup('0')
559 if rootnode != node.nullid:
559 if rootnode != node.nullid:
560 sharepath = os.path.join(sharepool, node.hex(rootnode))
560 sharepath = os.path.join(sharepool, node.hex(rootnode))
561 else:
561 else:
562 ui.status(_('(not using pooled storage: '
562 ui.status(_('(not using pooled storage: '
563 'remote appears to be empty)\n'))
563 'remote appears to be empty)\n'))
564 except error.RepoLookupError:
564 except error.RepoLookupError:
565 ui.status(_('(not using pooled storage: '
565 ui.status(_('(not using pooled storage: '
566 'unable to resolve identity of remote)\n'))
566 'unable to resolve identity of remote)\n'))
567 elif sharenamemode == 'remote':
567 elif sharenamemode == 'remote':
568 sharepath = os.path.join(
568 sharepath = os.path.join(
569 sharepool, node.hex(hashlib.sha1(source).digest()))
569 sharepool, node.hex(hashlib.sha1(source).digest()))
570 else:
570 else:
571 raise error.Abort(_('unknown share naming mode: %s') %
571 raise error.Abort(_('unknown share naming mode: %s') %
572 sharenamemode)
572 sharenamemode)
573
573
574 if sharepath:
574 if sharepath:
575 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
575 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
576 dest, pull=pull, rev=rev, update=update,
576 dest, pull=pull, rev=rev, update=update,
577 stream=stream)
577 stream=stream)
578
578
579 srclock = destlock = cleandir = None
579 srclock = destlock = cleandir = None
580 srcrepo = srcpeer.local()
580 srcrepo = srcpeer.local()
581 try:
581 try:
582 abspath = origsource
582 abspath = origsource
583 if islocal(origsource):
583 if islocal(origsource):
584 abspath = os.path.abspath(util.urllocalpath(origsource))
584 abspath = os.path.abspath(util.urllocalpath(origsource))
585
585
586 if islocal(dest):
586 if islocal(dest):
587 cleandir = dest
587 cleandir = dest
588
588
589 copy = False
589 copy = False
590 if (srcrepo and srcrepo.cancopy() and islocal(dest)
590 if (srcrepo and srcrepo.cancopy() and islocal(dest)
591 and not phases.hassecret(srcrepo)):
591 and not phases.hassecret(srcrepo)):
592 copy = not pull and not rev
592 copy = not pull and not rev
593
593
594 if copy:
594 if copy:
595 try:
595 try:
596 # we use a lock here because if we race with commit, we
596 # we use a lock here because if we race with commit, we
597 # can end up with extra data in the cloned revlogs that's
597 # can end up with extra data in the cloned revlogs that's
598 # not pointed to by changesets, thus causing verify to
598 # not pointed to by changesets, thus causing verify to
599 # fail
599 # fail
600 srclock = srcrepo.lock(wait=False)
600 srclock = srcrepo.lock(wait=False)
601 except error.LockError:
601 except error.LockError:
602 copy = False
602 copy = False
603
603
604 if copy:
604 if copy:
605 srcrepo.hook('preoutgoing', throw=True, source='clone')
605 srcrepo.hook('preoutgoing', throw=True, source='clone')
606 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
606 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
607 if not os.path.exists(dest):
607 if not os.path.exists(dest):
608 os.mkdir(dest)
608 os.mkdir(dest)
609 else:
609 else:
610 # only clean up directories we create ourselves
610 # only clean up directories we create ourselves
611 cleandir = hgdir
611 cleandir = hgdir
612 try:
612 try:
613 destpath = hgdir
613 destpath = hgdir
614 util.makedir(destpath, notindexed=True)
614 util.makedir(destpath, notindexed=True)
615 except OSError as inst:
615 except OSError as inst:
616 if inst.errno == errno.EEXIST:
616 if inst.errno == errno.EEXIST:
617 cleandir = None
617 cleandir = None
618 raise error.Abort(_("destination '%s' already exists")
618 raise error.Abort(_("destination '%s' already exists")
619 % dest)
619 % dest)
620 raise
620 raise
621
621
622 destlock = copystore(ui, srcrepo, destpath)
622 destlock = copystore(ui, srcrepo, destpath)
623 # copy bookmarks over
623 # copy bookmarks over
624 srcbookmarks = srcrepo.vfs.join('bookmarks')
624 srcbookmarks = srcrepo.vfs.join('bookmarks')
625 dstbookmarks = os.path.join(destpath, 'bookmarks')
625 dstbookmarks = os.path.join(destpath, 'bookmarks')
626 if os.path.exists(srcbookmarks):
626 if os.path.exists(srcbookmarks):
627 util.copyfile(srcbookmarks, dstbookmarks)
627 util.copyfile(srcbookmarks, dstbookmarks)
628
628
629 dstcachedir = os.path.join(destpath, 'cache')
629 dstcachedir = os.path.join(destpath, 'cache')
630 for cache in _cachetocopy(srcrepo):
630 for cache in _cachetocopy(srcrepo):
631 _copycache(srcrepo, dstcachedir, cache)
631 _copycache(srcrepo, dstcachedir, cache)
632
632
633 # we need to re-init the repo after manually copying the data
633 # we need to re-init the repo after manually copying the data
634 # into it
634 # into it
635 destpeer = peer(srcrepo, peeropts, dest)
635 destpeer = peer(srcrepo, peeropts, dest)
636 srcrepo.hook('outgoing', source='clone',
636 srcrepo.hook('outgoing', source='clone',
637 node=node.hex(node.nullid))
637 node=node.hex(node.nullid))
638 else:
638 else:
639 try:
639 try:
640 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
640 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
641 # only pass ui when no srcrepo
641 # only pass ui when no srcrepo
642 except OSError as inst:
642 except OSError as inst:
643 if inst.errno == errno.EEXIST:
643 if inst.errno == errno.EEXIST:
644 cleandir = None
644 cleandir = None
645 raise error.Abort(_("destination '%s' already exists")
645 raise error.Abort(_("destination '%s' already exists")
646 % dest)
646 % dest)
647 raise
647 raise
648
648
649 revs = None
649 revs = None
650 if rev:
650 if rev:
651 if not srcpeer.capable('lookup'):
651 if not srcpeer.capable('lookup'):
652 raise error.Abort(_("src repository does not support "
652 raise error.Abort(_("src repository does not support "
653 "revision lookup and so doesn't "
653 "revision lookup and so doesn't "
654 "support clone by revision"))
654 "support clone by revision"))
655 revs = [srcpeer.lookup(r) for r in rev]
655 revs = [srcpeer.lookup(r) for r in rev]
656 checkout = revs[0]
656 checkout = revs[0]
657 local = destpeer.local()
657 local = destpeer.local()
658 if local:
658 if local:
659 u = util.url(abspath)
659 u = util.url(abspath)
660 defaulturl = bytes(u)
660 defaulturl = bytes(u)
661 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
661 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
662 if not stream:
662 if not stream:
663 if pull:
663 if pull:
664 stream = False
664 stream = False
665 else:
665 else:
666 stream = None
666 stream = None
667 # internal config: ui.quietbookmarkmove
667 # internal config: ui.quietbookmarkmove
668 overrides = {('ui', 'quietbookmarkmove'): True}
668 overrides = {('ui', 'quietbookmarkmove'): True}
669 with local.ui.configoverride(overrides, 'clone'):
669 with local.ui.configoverride(overrides, 'clone'):
670 exchange.pull(local, srcpeer, revs,
670 exchange.pull(local, srcpeer, revs,
671 streamclonerequested=stream)
671 streamclonerequested=stream)
672 elif srcrepo:
672 elif srcrepo:
673 exchange.push(srcrepo, destpeer, revs=revs,
673 exchange.push(srcrepo, destpeer, revs=revs,
674 bookmarks=srcrepo._bookmarks.keys())
674 bookmarks=srcrepo._bookmarks.keys())
675 else:
675 else:
676 raise error.Abort(_("clone from remote to remote not supported")
676 raise error.Abort(_("clone from remote to remote not supported")
677 )
677 )
678
678
679 cleandir = None
679 cleandir = None
680
680
681 destrepo = destpeer.local()
681 destrepo = destpeer.local()
682 if destrepo:
682 if destrepo:
683 template = uimod.samplehgrcs['cloned']
683 template = uimod.samplehgrcs['cloned']
684 fp = destrepo.vfs("hgrc", "wb")
685 u = util.url(abspath)
684 u = util.url(abspath)
686 u.passwd = None
685 u.passwd = None
687 defaulturl = bytes(u)
686 defaulturl = bytes(u)
688 fp.write(util.tonativeeol(template % defaulturl))
687 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
689 fp.close()
690
691 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
688 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
692
689
693 if ui.configbool('experimental', 'remotenames'):
690 if ui.configbool('experimental', 'remotenames'):
694 logexchange.pullremotenames(destrepo, srcpeer)
691 logexchange.pullremotenames(destrepo, srcpeer)
695
692
696 if update:
693 if update:
697 if update is not True:
694 if update is not True:
698 checkout = srcpeer.lookup(update)
695 checkout = srcpeer.lookup(update)
699 uprev = None
696 uprev = None
700 status = None
697 status = None
701 if checkout is not None:
698 if checkout is not None:
702 try:
699 try:
703 uprev = destrepo.lookup(checkout)
700 uprev = destrepo.lookup(checkout)
704 except error.RepoLookupError:
701 except error.RepoLookupError:
705 if update is not True:
702 if update is not True:
706 try:
703 try:
707 uprev = destrepo.lookup(update)
704 uprev = destrepo.lookup(update)
708 except error.RepoLookupError:
705 except error.RepoLookupError:
709 pass
706 pass
710 if uprev is None:
707 if uprev is None:
711 try:
708 try:
712 uprev = destrepo._bookmarks['@']
709 uprev = destrepo._bookmarks['@']
713 update = '@'
710 update = '@'
714 bn = destrepo[uprev].branch()
711 bn = destrepo[uprev].branch()
715 if bn == 'default':
712 if bn == 'default':
716 status = _("updating to bookmark @\n")
713 status = _("updating to bookmark @\n")
717 else:
714 else:
718 status = (_("updating to bookmark @ on branch %s\n")
715 status = (_("updating to bookmark @ on branch %s\n")
719 % bn)
716 % bn)
720 except KeyError:
717 except KeyError:
721 try:
718 try:
722 uprev = destrepo.branchtip('default')
719 uprev = destrepo.branchtip('default')
723 except error.RepoLookupError:
720 except error.RepoLookupError:
724 uprev = destrepo.lookup('tip')
721 uprev = destrepo.lookup('tip')
725 if not status:
722 if not status:
726 bn = destrepo[uprev].branch()
723 bn = destrepo[uprev].branch()
727 status = _("updating to branch %s\n") % bn
724 status = _("updating to branch %s\n") % bn
728 destrepo.ui.status(status)
725 destrepo.ui.status(status)
729 _update(destrepo, uprev)
726 _update(destrepo, uprev)
730 if update in destrepo._bookmarks:
727 if update in destrepo._bookmarks:
731 bookmarks.activate(destrepo, update)
728 bookmarks.activate(destrepo, update)
732 finally:
729 finally:
733 release(srclock, destlock)
730 release(srclock, destlock)
734 if cleandir is not None:
731 if cleandir is not None:
735 shutil.rmtree(cleandir, True)
732 shutil.rmtree(cleandir, True)
736 if srcpeer is not None:
733 if srcpeer is not None:
737 srcpeer.close()
734 srcpeer.close()
738 return srcpeer, destpeer
735 return srcpeer, destpeer
739
736
740 def _showstats(repo, stats, quietempty=False):
737 def _showstats(repo, stats, quietempty=False):
741 if quietempty and not any(stats):
738 if quietempty and not any(stats):
742 return
739 return
743 repo.ui.status(_("%d files updated, %d files merged, "
740 repo.ui.status(_("%d files updated, %d files merged, "
744 "%d files removed, %d files unresolved\n") % stats)
741 "%d files removed, %d files unresolved\n") % stats)
745
742
746 def updaterepo(repo, node, overwrite, updatecheck=None):
743 def updaterepo(repo, node, overwrite, updatecheck=None):
747 """Update the working directory to node.
744 """Update the working directory to node.
748
745
749 When overwrite is set, changes are clobbered, merged else
746 When overwrite is set, changes are clobbered, merged else
750
747
751 returns stats (see pydoc mercurial.merge.applyupdates)"""
748 returns stats (see pydoc mercurial.merge.applyupdates)"""
752 return mergemod.update(repo, node, False, overwrite,
749 return mergemod.update(repo, node, False, overwrite,
753 labels=['working copy', 'destination'],
750 labels=['working copy', 'destination'],
754 updatecheck=updatecheck)
751 updatecheck=updatecheck)
755
752
756 def update(repo, node, quietempty=False, updatecheck=None):
753 def update(repo, node, quietempty=False, updatecheck=None):
757 """update the working directory to node"""
754 """update the working directory to node"""
758 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
755 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
759 _showstats(repo, stats, quietempty)
756 _showstats(repo, stats, quietempty)
760 if stats[3]:
757 if stats[3]:
761 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
758 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
762 return stats[3] > 0
759 return stats[3] > 0
763
760
764 # naming conflict in clone()
761 # naming conflict in clone()
765 _update = update
762 _update = update
766
763
767 def clean(repo, node, show_stats=True, quietempty=False):
764 def clean(repo, node, show_stats=True, quietempty=False):
768 """forcibly switch the working directory to node, clobbering changes"""
765 """forcibly switch the working directory to node, clobbering changes"""
769 stats = updaterepo(repo, node, True)
766 stats = updaterepo(repo, node, True)
770 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
767 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
771 if show_stats:
768 if show_stats:
772 _showstats(repo, stats, quietempty)
769 _showstats(repo, stats, quietempty)
773 return stats[3] > 0
770 return stats[3] > 0
774
771
775 # naming conflict in updatetotally()
772 # naming conflict in updatetotally()
776 _clean = clean
773 _clean = clean
777
774
778 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
775 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
779 """Update the working directory with extra care for non-file components
776 """Update the working directory with extra care for non-file components
780
777
781 This takes care of non-file components below:
778 This takes care of non-file components below:
782
779
783 :bookmark: might be advanced or (in)activated
780 :bookmark: might be advanced or (in)activated
784
781
785 This takes arguments below:
782 This takes arguments below:
786
783
787 :checkout: to which revision the working directory is updated
784 :checkout: to which revision the working directory is updated
788 :brev: a name, which might be a bookmark to be activated after updating
785 :brev: a name, which might be a bookmark to be activated after updating
789 :clean: whether changes in the working directory can be discarded
786 :clean: whether changes in the working directory can be discarded
790 :updatecheck: how to deal with a dirty working directory
787 :updatecheck: how to deal with a dirty working directory
791
788
792 Valid values for updatecheck are (None => linear):
789 Valid values for updatecheck are (None => linear):
793
790
794 * abort: abort if the working directory is dirty
791 * abort: abort if the working directory is dirty
795 * none: don't check (merge working directory changes into destination)
792 * none: don't check (merge working directory changes into destination)
796 * linear: check that update is linear before merging working directory
793 * linear: check that update is linear before merging working directory
797 changes into destination
794 changes into destination
798 * noconflict: check that the update does not result in file merges
795 * noconflict: check that the update does not result in file merges
799
796
800 This returns whether conflict is detected at updating or not.
797 This returns whether conflict is detected at updating or not.
801 """
798 """
802 if updatecheck is None:
799 if updatecheck is None:
803 updatecheck = ui.config('commands', 'update.check')
800 updatecheck = ui.config('commands', 'update.check')
804 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
801 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
805 # If not configured, or invalid value configured
802 # If not configured, or invalid value configured
806 updatecheck = 'linear'
803 updatecheck = 'linear'
807 with repo.wlock():
804 with repo.wlock():
808 movemarkfrom = None
805 movemarkfrom = None
809 warndest = False
806 warndest = False
810 if checkout is None:
807 if checkout is None:
811 updata = destutil.destupdate(repo, clean=clean)
808 updata = destutil.destupdate(repo, clean=clean)
812 checkout, movemarkfrom, brev = updata
809 checkout, movemarkfrom, brev = updata
813 warndest = True
810 warndest = True
814
811
815 if clean:
812 if clean:
816 ret = _clean(repo, checkout)
813 ret = _clean(repo, checkout)
817 else:
814 else:
818 if updatecheck == 'abort':
815 if updatecheck == 'abort':
819 cmdutil.bailifchanged(repo, merge=False)
816 cmdutil.bailifchanged(repo, merge=False)
820 updatecheck = 'none'
817 updatecheck = 'none'
821 ret = _update(repo, checkout, updatecheck=updatecheck)
818 ret = _update(repo, checkout, updatecheck=updatecheck)
822
819
823 if not ret and movemarkfrom:
820 if not ret and movemarkfrom:
824 if movemarkfrom == repo['.'].node():
821 if movemarkfrom == repo['.'].node():
825 pass # no-op update
822 pass # no-op update
826 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
823 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
827 b = ui.label(repo._activebookmark, 'bookmarks.active')
824 b = ui.label(repo._activebookmark, 'bookmarks.active')
828 ui.status(_("updating bookmark %s\n") % b)
825 ui.status(_("updating bookmark %s\n") % b)
829 else:
826 else:
830 # this can happen with a non-linear update
827 # this can happen with a non-linear update
831 b = ui.label(repo._activebookmark, 'bookmarks')
828 b = ui.label(repo._activebookmark, 'bookmarks')
832 ui.status(_("(leaving bookmark %s)\n") % b)
829 ui.status(_("(leaving bookmark %s)\n") % b)
833 bookmarks.deactivate(repo)
830 bookmarks.deactivate(repo)
834 elif brev in repo._bookmarks:
831 elif brev in repo._bookmarks:
835 if brev != repo._activebookmark:
832 if brev != repo._activebookmark:
836 b = ui.label(brev, 'bookmarks.active')
833 b = ui.label(brev, 'bookmarks.active')
837 ui.status(_("(activating bookmark %s)\n") % b)
834 ui.status(_("(activating bookmark %s)\n") % b)
838 bookmarks.activate(repo, brev)
835 bookmarks.activate(repo, brev)
839 elif brev:
836 elif brev:
840 if repo._activebookmark:
837 if repo._activebookmark:
841 b = ui.label(repo._activebookmark, 'bookmarks')
838 b = ui.label(repo._activebookmark, 'bookmarks')
842 ui.status(_("(leaving bookmark %s)\n") % b)
839 ui.status(_("(leaving bookmark %s)\n") % b)
843 bookmarks.deactivate(repo)
840 bookmarks.deactivate(repo)
844
841
845 if warndest:
842 if warndest:
846 destutil.statusotherdests(ui, repo)
843 destutil.statusotherdests(ui, repo)
847
844
848 return ret
845 return ret
849
846
850 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
847 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
851 """Branch merge with node, resolving changes. Return true if any
848 """Branch merge with node, resolving changes. Return true if any
852 unresolved conflicts."""
849 unresolved conflicts."""
853 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
850 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
854 labels=labels)
851 labels=labels)
855 _showstats(repo, stats)
852 _showstats(repo, stats)
856 if stats[3]:
853 if stats[3]:
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
854 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
858 "or 'hg update -C .' to abandon\n"))
855 "or 'hg update -C .' to abandon\n"))
859 elif remind:
856 elif remind:
860 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
857 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
861 return stats[3] > 0
858 return stats[3] > 0
862
859
863 def _incoming(displaychlist, subreporecurse, ui, repo, source,
860 def _incoming(displaychlist, subreporecurse, ui, repo, source,
864 opts, buffered=False):
861 opts, buffered=False):
865 """
862 """
866 Helper for incoming / gincoming.
863 Helper for incoming / gincoming.
867 displaychlist gets called with
864 displaychlist gets called with
868 (remoterepo, incomingchangesetlist, displayer) parameters,
865 (remoterepo, incomingchangesetlist, displayer) parameters,
869 and is supposed to contain only code that can't be unified.
866 and is supposed to contain only code that can't be unified.
870 """
867 """
871 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
868 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
872 other = peer(repo, opts, source)
869 other = peer(repo, opts, source)
873 ui.status(_('comparing with %s\n') % util.hidepassword(source))
870 ui.status(_('comparing with %s\n') % util.hidepassword(source))
874 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
871 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
875
872
876 if revs:
873 if revs:
877 revs = [other.lookup(rev) for rev in revs]
874 revs = [other.lookup(rev) for rev in revs]
878 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
875 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
879 revs, opts["bundle"], opts["force"])
876 revs, opts["bundle"], opts["force"])
880 try:
877 try:
881 if not chlist:
878 if not chlist:
882 ui.status(_("no changes found\n"))
879 ui.status(_("no changes found\n"))
883 return subreporecurse()
880 return subreporecurse()
884 ui.pager('incoming')
881 ui.pager('incoming')
885 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
882 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
886 displaychlist(other, chlist, displayer)
883 displaychlist(other, chlist, displayer)
887 displayer.close()
884 displayer.close()
888 finally:
885 finally:
889 cleanupfn()
886 cleanupfn()
890 subreporecurse()
887 subreporecurse()
891 return 0 # exit code is zero since we found incoming changes
888 return 0 # exit code is zero since we found incoming changes
892
889
893 def incoming(ui, repo, source, opts):
890 def incoming(ui, repo, source, opts):
894 def subreporecurse():
891 def subreporecurse():
895 ret = 1
892 ret = 1
896 if opts.get('subrepos'):
893 if opts.get('subrepos'):
897 ctx = repo[None]
894 ctx = repo[None]
898 for subpath in sorted(ctx.substate):
895 for subpath in sorted(ctx.substate):
899 sub = ctx.sub(subpath)
896 sub = ctx.sub(subpath)
900 ret = min(ret, sub.incoming(ui, source, opts))
897 ret = min(ret, sub.incoming(ui, source, opts))
901 return ret
898 return ret
902
899
903 def display(other, chlist, displayer):
900 def display(other, chlist, displayer):
904 limit = cmdutil.loglimit(opts)
901 limit = cmdutil.loglimit(opts)
905 if opts.get('newest_first'):
902 if opts.get('newest_first'):
906 chlist.reverse()
903 chlist.reverse()
907 count = 0
904 count = 0
908 for n in chlist:
905 for n in chlist:
909 if limit is not None and count >= limit:
906 if limit is not None and count >= limit:
910 break
907 break
911 parents = [p for p in other.changelog.parents(n) if p != nullid]
908 parents = [p for p in other.changelog.parents(n) if p != nullid]
912 if opts.get('no_merges') and len(parents) == 2:
909 if opts.get('no_merges') and len(parents) == 2:
913 continue
910 continue
914 count += 1
911 count += 1
915 displayer.show(other[n])
912 displayer.show(other[n])
916 return _incoming(display, subreporecurse, ui, repo, source, opts)
913 return _incoming(display, subreporecurse, ui, repo, source, opts)
917
914
918 def _outgoing(ui, repo, dest, opts):
915 def _outgoing(ui, repo, dest, opts):
919 path = ui.paths.getpath(dest, default=('default-push', 'default'))
916 path = ui.paths.getpath(dest, default=('default-push', 'default'))
920 if not path:
917 if not path:
921 raise error.Abort(_('default repository not configured!'),
918 raise error.Abort(_('default repository not configured!'),
922 hint=_("see 'hg help config.paths'"))
919 hint=_("see 'hg help config.paths'"))
923 dest = path.pushloc or path.loc
920 dest = path.pushloc or path.loc
924 branches = path.branch, opts.get('branch') or []
921 branches = path.branch, opts.get('branch') or []
925
922
926 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
923 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
927 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
924 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
928 if revs:
925 if revs:
929 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
926 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
930
927
931 other = peer(repo, opts, dest)
928 other = peer(repo, opts, dest)
932 outgoing = discovery.findcommonoutgoing(repo, other, revs,
929 outgoing = discovery.findcommonoutgoing(repo, other, revs,
933 force=opts.get('force'))
930 force=opts.get('force'))
934 o = outgoing.missing
931 o = outgoing.missing
935 if not o:
932 if not o:
936 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
933 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
937 return o, other
934 return o, other
938
935
939 def outgoing(ui, repo, dest, opts):
936 def outgoing(ui, repo, dest, opts):
940 def recurse():
937 def recurse():
941 ret = 1
938 ret = 1
942 if opts.get('subrepos'):
939 if opts.get('subrepos'):
943 ctx = repo[None]
940 ctx = repo[None]
944 for subpath in sorted(ctx.substate):
941 for subpath in sorted(ctx.substate):
945 sub = ctx.sub(subpath)
942 sub = ctx.sub(subpath)
946 ret = min(ret, sub.outgoing(ui, dest, opts))
943 ret = min(ret, sub.outgoing(ui, dest, opts))
947 return ret
944 return ret
948
945
949 limit = cmdutil.loglimit(opts)
946 limit = cmdutil.loglimit(opts)
950 o, other = _outgoing(ui, repo, dest, opts)
947 o, other = _outgoing(ui, repo, dest, opts)
951 if not o:
948 if not o:
952 cmdutil.outgoinghooks(ui, repo, other, opts, o)
949 cmdutil.outgoinghooks(ui, repo, other, opts, o)
953 return recurse()
950 return recurse()
954
951
955 if opts.get('newest_first'):
952 if opts.get('newest_first'):
956 o.reverse()
953 o.reverse()
957 ui.pager('outgoing')
954 ui.pager('outgoing')
958 displayer = cmdutil.show_changeset(ui, repo, opts)
955 displayer = cmdutil.show_changeset(ui, repo, opts)
959 count = 0
956 count = 0
960 for n in o:
957 for n in o:
961 if limit is not None and count >= limit:
958 if limit is not None and count >= limit:
962 break
959 break
963 parents = [p for p in repo.changelog.parents(n) if p != nullid]
960 parents = [p for p in repo.changelog.parents(n) if p != nullid]
964 if opts.get('no_merges') and len(parents) == 2:
961 if opts.get('no_merges') and len(parents) == 2:
965 continue
962 continue
966 count += 1
963 count += 1
967 displayer.show(repo[n])
964 displayer.show(repo[n])
968 displayer.close()
965 displayer.close()
969 cmdutil.outgoinghooks(ui, repo, other, opts, o)
966 cmdutil.outgoinghooks(ui, repo, other, opts, o)
970 recurse()
967 recurse()
971 return 0 # exit code is zero since we found outgoing changes
968 return 0 # exit code is zero since we found outgoing changes
972
969
973 def verify(repo):
970 def verify(repo):
974 """verify the consistency of a repository"""
971 """verify the consistency of a repository"""
975 ret = verifymod.verify(repo)
972 ret = verifymod.verify(repo)
976
973
977 # Broken subrepo references in hidden csets don't seem worth worrying about,
974 # Broken subrepo references in hidden csets don't seem worth worrying about,
978 # since they can't be pushed/pulled, and --hidden can be used if they are a
975 # since they can't be pushed/pulled, and --hidden can be used if they are a
979 # concern.
976 # concern.
980
977
981 # pathto() is needed for -R case
978 # pathto() is needed for -R case
982 revs = repo.revs("filelog(%s)",
979 revs = repo.revs("filelog(%s)",
983 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
980 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
984
981
985 if revs:
982 if revs:
986 repo.ui.status(_('checking subrepo links\n'))
983 repo.ui.status(_('checking subrepo links\n'))
987 for rev in revs:
984 for rev in revs:
988 ctx = repo[rev]
985 ctx = repo[rev]
989 try:
986 try:
990 for subpath in ctx.substate:
987 for subpath in ctx.substate:
991 try:
988 try:
992 ret = (ctx.sub(subpath, allowcreate=False).verify()
989 ret = (ctx.sub(subpath, allowcreate=False).verify()
993 or ret)
990 or ret)
994 except error.RepoError as e:
991 except error.RepoError as e:
995 repo.ui.warn(('%s: %s\n') % (rev, e))
992 repo.ui.warn(('%s: %s\n') % (rev, e))
996 except Exception:
993 except Exception:
997 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
994 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
998 node.short(ctx.node()))
995 node.short(ctx.node()))
999
996
1000 return ret
997 return ret
1001
998
1002 def remoteui(src, opts):
999 def remoteui(src, opts):
1003 'build a remote ui from ui or repo and opts'
1000 'build a remote ui from ui or repo and opts'
1004 if util.safehasattr(src, 'baseui'): # looks like a repository
1001 if util.safehasattr(src, 'baseui'): # looks like a repository
1005 dst = src.baseui.copy() # drop repo-specific config
1002 dst = src.baseui.copy() # drop repo-specific config
1006 src = src.ui # copy target options from repo
1003 src = src.ui # copy target options from repo
1007 else: # assume it's a global ui object
1004 else: # assume it's a global ui object
1008 dst = src.copy() # keep all global options
1005 dst = src.copy() # keep all global options
1009
1006
1010 # copy ssh-specific options
1007 # copy ssh-specific options
1011 for o in 'ssh', 'remotecmd':
1008 for o in 'ssh', 'remotecmd':
1012 v = opts.get(o) or src.config('ui', o)
1009 v = opts.get(o) or src.config('ui', o)
1013 if v:
1010 if v:
1014 dst.setconfig("ui", o, v, 'copied')
1011 dst.setconfig("ui", o, v, 'copied')
1015
1012
1016 # copy bundle-specific options
1013 # copy bundle-specific options
1017 r = src.config('bundle', 'mainreporoot')
1014 r = src.config('bundle', 'mainreporoot')
1018 if r:
1015 if r:
1019 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1016 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1020
1017
1021 # copy selected local settings to the remote ui
1018 # copy selected local settings to the remote ui
1022 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1019 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1023 for key, val in src.configitems(sect):
1020 for key, val in src.configitems(sect):
1024 dst.setconfig(sect, key, val, 'copied')
1021 dst.setconfig(sect, key, val, 'copied')
1025 v = src.config('web', 'cacerts')
1022 v = src.config('web', 'cacerts')
1026 if v:
1023 if v:
1027 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1024 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1028
1025
1029 return dst
1026 return dst
1030
1027
1031 # Files of interest
1028 # Files of interest
1032 # Used to check if the repository has changed looking at mtime and size of
1029 # Used to check if the repository has changed looking at mtime and size of
1033 # these files.
1030 # these files.
1034 foi = [('spath', '00changelog.i'),
1031 foi = [('spath', '00changelog.i'),
1035 ('spath', 'phaseroots'), # ! phase can change content at the same size
1032 ('spath', 'phaseroots'), # ! phase can change content at the same size
1036 ('spath', 'obsstore'),
1033 ('spath', 'obsstore'),
1037 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1034 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1038 ]
1035 ]
1039
1036
1040 class cachedlocalrepo(object):
1037 class cachedlocalrepo(object):
1041 """Holds a localrepository that can be cached and reused."""
1038 """Holds a localrepository that can be cached and reused."""
1042
1039
1043 def __init__(self, repo):
1040 def __init__(self, repo):
1044 """Create a new cached repo from an existing repo.
1041 """Create a new cached repo from an existing repo.
1045
1042
1046 We assume the passed in repo was recently created. If the
1043 We assume the passed in repo was recently created. If the
1047 repo has changed between when it was created and when it was
1044 repo has changed between when it was created and when it was
1048 turned into a cache, it may not refresh properly.
1045 turned into a cache, it may not refresh properly.
1049 """
1046 """
1050 assert isinstance(repo, localrepo.localrepository)
1047 assert isinstance(repo, localrepo.localrepository)
1051 self._repo = repo
1048 self._repo = repo
1052 self._state, self.mtime = self._repostate()
1049 self._state, self.mtime = self._repostate()
1053 self._filtername = repo.filtername
1050 self._filtername = repo.filtername
1054
1051
1055 def fetch(self):
1052 def fetch(self):
1056 """Refresh (if necessary) and return a repository.
1053 """Refresh (if necessary) and return a repository.
1057
1054
1058 If the cached instance is out of date, it will be recreated
1055 If the cached instance is out of date, it will be recreated
1059 automatically and returned.
1056 automatically and returned.
1060
1057
1061 Returns a tuple of the repo and a boolean indicating whether a new
1058 Returns a tuple of the repo and a boolean indicating whether a new
1062 repo instance was created.
1059 repo instance was created.
1063 """
1060 """
1064 # We compare the mtimes and sizes of some well-known files to
1061 # We compare the mtimes and sizes of some well-known files to
1065 # determine if the repo changed. This is not precise, as mtimes
1062 # determine if the repo changed. This is not precise, as mtimes
1066 # are susceptible to clock skew and imprecise filesystems and
1063 # are susceptible to clock skew and imprecise filesystems and
1067 # file content can change while maintaining the same size.
1064 # file content can change while maintaining the same size.
1068
1065
1069 state, mtime = self._repostate()
1066 state, mtime = self._repostate()
1070 if state == self._state:
1067 if state == self._state:
1071 return self._repo, False
1068 return self._repo, False
1072
1069
1073 repo = repository(self._repo.baseui, self._repo.url())
1070 repo = repository(self._repo.baseui, self._repo.url())
1074 if self._filtername:
1071 if self._filtername:
1075 self._repo = repo.filtered(self._filtername)
1072 self._repo = repo.filtered(self._filtername)
1076 else:
1073 else:
1077 self._repo = repo.unfiltered()
1074 self._repo = repo.unfiltered()
1078 self._state = state
1075 self._state = state
1079 self.mtime = mtime
1076 self.mtime = mtime
1080
1077
1081 return self._repo, True
1078 return self._repo, True
1082
1079
1083 def _repostate(self):
1080 def _repostate(self):
1084 state = []
1081 state = []
1085 maxmtime = -1
1082 maxmtime = -1
1086 for attr, fname in foi:
1083 for attr, fname in foi:
1087 prefix = getattr(self._repo, attr)
1084 prefix = getattr(self._repo, attr)
1088 p = os.path.join(prefix, fname)
1085 p = os.path.join(prefix, fname)
1089 try:
1086 try:
1090 st = os.stat(p)
1087 st = os.stat(p)
1091 except OSError:
1088 except OSError:
1092 st = os.stat(prefix)
1089 st = os.stat(prefix)
1093 state.append((st.st_mtime, st.st_size))
1090 state.append((st.st_mtime, st.st_size))
1094 maxmtime = max(maxmtime, st.st_mtime)
1091 maxmtime = max(maxmtime, st.st_mtime)
1095
1092
1096 return tuple(state), maxmtime
1093 return tuple(state), maxmtime
1097
1094
1098 def copy(self):
1095 def copy(self):
1099 """Obtain a copy of this class instance.
1096 """Obtain a copy of this class instance.
1100
1097
1101 A new localrepository instance is obtained. The new instance should be
1098 A new localrepository instance is obtained. The new instance should be
1102 completely independent of the original.
1099 completely independent of the original.
1103 """
1100 """
1104 repo = repository(self._repo.baseui, self._repo.origroot)
1101 repo = repository(self._repo.baseui, self._repo.origroot)
1105 if self._filtername:
1102 if self._filtername:
1106 repo = repo.filtered(self._filtername)
1103 repo = repo.filtered(self._filtername)
1107 else:
1104 else:
1108 repo = repo.unfiltered()
1105 repo = repo.unfiltered()
1109 c = cachedlocalrepo(repo)
1106 c = cachedlocalrepo(repo)
1110 c._state = self._state
1107 c._state = self._state
1111 c.mtime = self.mtime
1108 c.mtime = self.mtime
1112 return c
1109 return c
General Comments 0
You need to be logged in to leave comments. Login now