##// END OF EJS Templates
py3: check for bytes instead of str in hg.share()...
Gregory Szorc -
r36066:488e3139 default
parent child Browse files
Show More
@@ -1,1117 +1,1117 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 nullid,
18 nullid,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 bookmarks,
22 bookmarks,
23 bundlerepo,
23 bundlerepo,
24 cacheutil,
24 cacheutil,
25 cmdutil,
25 cmdutil,
26 destutil,
26 destutil,
27 discovery,
27 discovery,
28 error,
28 error,
29 exchange,
29 exchange,
30 extensions,
30 extensions,
31 httppeer,
31 httppeer,
32 localrepo,
32 localrepo,
33 lock,
33 lock,
34 logcmdutil,
34 logcmdutil,
35 logexchange,
35 logexchange,
36 merge as mergemod,
36 merge as mergemod,
37 node,
37 node,
38 phases,
38 phases,
39 scmutil,
39 scmutil,
40 sshpeer,
40 sshpeer,
41 statichttprepo,
41 statichttprepo,
42 ui as uimod,
42 ui as uimod,
43 unionrepo,
43 unionrepo,
44 url,
44 url,
45 util,
45 util,
46 verify as verifymod,
46 verify as verifymod,
47 vfs as vfsmod,
47 vfs as vfsmod,
48 )
48 )
49
49
50 release = lock.release
50 release = lock.release
51
51
52 # shared features
52 # shared features
53 sharedbookmarks = 'bookmarks'
53 sharedbookmarks = 'bookmarks'
54
54
55 def _local(path):
55 def _local(path):
56 path = util.expandpath(util.urllocalpath(path))
56 path = util.expandpath(util.urllocalpath(path))
57 return (os.path.isfile(path) and bundlerepo or localrepo)
57 return (os.path.isfile(path) and bundlerepo or localrepo)
58
58
59 def addbranchrevs(lrepo, other, branches, revs):
59 def addbranchrevs(lrepo, other, branches, revs):
60 peer = other.peer() # a courtesy to callers using a localrepo for other
60 peer = other.peer() # a courtesy to callers using a localrepo for other
61 hashbranch, branches = branches
61 hashbranch, branches = branches
62 if not hashbranch and not branches:
62 if not hashbranch and not branches:
63 x = revs or None
63 x = revs or None
64 if util.safehasattr(revs, 'first'):
64 if util.safehasattr(revs, 'first'):
65 y = revs.first()
65 y = revs.first()
66 elif revs:
66 elif revs:
67 y = revs[0]
67 y = revs[0]
68 else:
68 else:
69 y = None
69 y = None
70 return x, y
70 return x, y
71 if revs:
71 if revs:
72 revs = list(revs)
72 revs = list(revs)
73 else:
73 else:
74 revs = []
74 revs = []
75
75
76 if not peer.capable('branchmap'):
76 if not peer.capable('branchmap'):
77 if branches:
77 if branches:
78 raise error.Abort(_("remote branch lookup not supported"))
78 raise error.Abort(_("remote branch lookup not supported"))
79 revs.append(hashbranch)
79 revs.append(hashbranch)
80 return revs, revs[0]
80 return revs, revs[0]
81 branchmap = peer.branchmap()
81 branchmap = peer.branchmap()
82
82
83 def primary(branch):
83 def primary(branch):
84 if branch == '.':
84 if branch == '.':
85 if not lrepo:
85 if not lrepo:
86 raise error.Abort(_("dirstate branch not accessible"))
86 raise error.Abort(_("dirstate branch not accessible"))
87 branch = lrepo.dirstate.branch()
87 branch = lrepo.dirstate.branch()
88 if branch in branchmap:
88 if branch in branchmap:
89 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
89 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
90 return True
90 return True
91 else:
91 else:
92 return False
92 return False
93
93
94 for branch in branches:
94 for branch in branches:
95 if not primary(branch):
95 if not primary(branch):
96 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
96 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
97 if hashbranch:
97 if hashbranch:
98 if not primary(hashbranch):
98 if not primary(hashbranch):
99 revs.append(hashbranch)
99 revs.append(hashbranch)
100 return revs, revs[0]
100 return revs, revs[0]
101
101
102 def parseurl(path, branches=None):
102 def parseurl(path, branches=None):
103 '''parse url#branch, returning (url, (branch, branches))'''
103 '''parse url#branch, returning (url, (branch, branches))'''
104
104
105 u = util.url(path)
105 u = util.url(path)
106 branch = None
106 branch = None
107 if u.fragment:
107 if u.fragment:
108 branch = u.fragment
108 branch = u.fragment
109 u.fragment = None
109 u.fragment = None
110 return bytes(u), (branch, branches or [])
110 return bytes(u), (branch, branches or [])
111
111
112 schemes = {
112 schemes = {
113 'bundle': bundlerepo,
113 'bundle': bundlerepo,
114 'union': unionrepo,
114 'union': unionrepo,
115 'file': _local,
115 'file': _local,
116 'http': httppeer,
116 'http': httppeer,
117 'https': httppeer,
117 'https': httppeer,
118 'ssh': sshpeer,
118 'ssh': sshpeer,
119 'static-http': statichttprepo,
119 'static-http': statichttprepo,
120 }
120 }
121
121
122 def _peerlookup(path):
122 def _peerlookup(path):
123 u = util.url(path)
123 u = util.url(path)
124 scheme = u.scheme or 'file'
124 scheme = u.scheme or 'file'
125 thing = schemes.get(scheme) or schemes['file']
125 thing = schemes.get(scheme) or schemes['file']
126 try:
126 try:
127 return thing(path)
127 return thing(path)
128 except TypeError:
128 except TypeError:
129 # we can't test callable(thing) because 'thing' can be an unloaded
129 # we can't test callable(thing) because 'thing' can be an unloaded
130 # module that implements __call__
130 # module that implements __call__
131 if not util.safehasattr(thing, 'instance'):
131 if not util.safehasattr(thing, 'instance'):
132 raise
132 raise
133 return thing
133 return thing
134
134
135 def islocal(repo):
135 def islocal(repo):
136 '''return true if repo (or path pointing to repo) is local'''
136 '''return true if repo (or path pointing to repo) is local'''
137 if isinstance(repo, bytes):
137 if isinstance(repo, bytes):
138 try:
138 try:
139 return _peerlookup(repo).islocal(repo)
139 return _peerlookup(repo).islocal(repo)
140 except AttributeError:
140 except AttributeError:
141 return False
141 return False
142 return repo.local()
142 return repo.local()
143
143
144 def openpath(ui, path):
144 def openpath(ui, path):
145 '''open path with open if local, url.open if remote'''
145 '''open path with open if local, url.open if remote'''
146 pathurl = util.url(path, parsequery=False, parsefragment=False)
146 pathurl = util.url(path, parsequery=False, parsefragment=False)
147 if pathurl.islocal():
147 if pathurl.islocal():
148 return util.posixfile(pathurl.localpath(), 'rb')
148 return util.posixfile(pathurl.localpath(), 'rb')
149 else:
149 else:
150 return url.open(ui, path)
150 return url.open(ui, path)
151
151
152 # a list of (ui, repo) functions called for wire peer initialization
152 # a list of (ui, repo) functions called for wire peer initialization
153 wirepeersetupfuncs = []
153 wirepeersetupfuncs = []
154
154
155 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
155 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
156 """return a repository object for the specified path"""
156 """return a repository object for the specified path"""
157 obj = _peerlookup(path).instance(ui, path, create)
157 obj = _peerlookup(path).instance(ui, path, create)
158 ui = getattr(obj, "ui", ui)
158 ui = getattr(obj, "ui", ui)
159 for f in presetupfuncs or []:
159 for f in presetupfuncs or []:
160 f(ui, obj)
160 f(ui, obj)
161 for name, module in extensions.extensions(ui):
161 for name, module in extensions.extensions(ui):
162 hook = getattr(module, 'reposetup', None)
162 hook = getattr(module, 'reposetup', None)
163 if hook:
163 if hook:
164 hook(ui, obj)
164 hook(ui, obj)
165 if not obj.local():
165 if not obj.local():
166 for f in wirepeersetupfuncs:
166 for f in wirepeersetupfuncs:
167 f(ui, obj)
167 f(ui, obj)
168 return obj
168 return obj
169
169
170 def repository(ui, path='', create=False, presetupfuncs=None):
170 def repository(ui, path='', create=False, presetupfuncs=None):
171 """return a repository object for the specified path"""
171 """return a repository object for the specified path"""
172 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
172 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
173 repo = peer.local()
173 repo = peer.local()
174 if not repo:
174 if not repo:
175 raise error.Abort(_("repository '%s' is not local") %
175 raise error.Abort(_("repository '%s' is not local") %
176 (path or peer.url()))
176 (path or peer.url()))
177 return repo.filtered('visible')
177 return repo.filtered('visible')
178
178
179 def peer(uiorrepo, opts, path, create=False):
179 def peer(uiorrepo, opts, path, create=False):
180 '''return a repository peer for the specified path'''
180 '''return a repository peer for the specified path'''
181 rui = remoteui(uiorrepo, opts)
181 rui = remoteui(uiorrepo, opts)
182 return _peerorrepo(rui, path, create).peer()
182 return _peerorrepo(rui, path, create).peer()
183
183
184 def defaultdest(source):
184 def defaultdest(source):
185 '''return default destination of clone if none is given
185 '''return default destination of clone if none is given
186
186
187 >>> defaultdest(b'foo')
187 >>> defaultdest(b'foo')
188 'foo'
188 'foo'
189 >>> defaultdest(b'/foo/bar')
189 >>> defaultdest(b'/foo/bar')
190 'bar'
190 'bar'
191 >>> defaultdest(b'/')
191 >>> defaultdest(b'/')
192 ''
192 ''
193 >>> defaultdest(b'')
193 >>> defaultdest(b'')
194 ''
194 ''
195 >>> defaultdest(b'http://example.org/')
195 >>> defaultdest(b'http://example.org/')
196 ''
196 ''
197 >>> defaultdest(b'http://example.org/foo/')
197 >>> defaultdest(b'http://example.org/foo/')
198 'foo'
198 'foo'
199 '''
199 '''
200 path = util.url(source).path
200 path = util.url(source).path
201 if not path:
201 if not path:
202 return ''
202 return ''
203 return os.path.basename(os.path.normpath(path))
203 return os.path.basename(os.path.normpath(path))
204
204
205 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
205 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
206 relative=False):
206 relative=False):
207 '''create a shared repository'''
207 '''create a shared repository'''
208
208
209 if not islocal(source):
209 if not islocal(source):
210 raise error.Abort(_('can only share local repositories'))
210 raise error.Abort(_('can only share local repositories'))
211
211
212 if not dest:
212 if not dest:
213 dest = defaultdest(source)
213 dest = defaultdest(source)
214 else:
214 else:
215 dest = ui.expandpath(dest)
215 dest = ui.expandpath(dest)
216
216
217 if isinstance(source, str):
217 if isinstance(source, bytes):
218 origsource = ui.expandpath(source)
218 origsource = ui.expandpath(source)
219 source, branches = parseurl(origsource)
219 source, branches = parseurl(origsource)
220 srcrepo = repository(ui, source)
220 srcrepo = repository(ui, source)
221 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
221 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
222 else:
222 else:
223 srcrepo = source.local()
223 srcrepo = source.local()
224 origsource = source = srcrepo.url()
224 origsource = source = srcrepo.url()
225 checkout = None
225 checkout = None
226
226
227 sharedpath = srcrepo.sharedpath # if our source is already sharing
227 sharedpath = srcrepo.sharedpath # if our source is already sharing
228
228
229 destwvfs = vfsmod.vfs(dest, realpath=True)
229 destwvfs = vfsmod.vfs(dest, realpath=True)
230 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
230 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
231
231
232 if destvfs.lexists():
232 if destvfs.lexists():
233 raise error.Abort(_('destination already exists'))
233 raise error.Abort(_('destination already exists'))
234
234
235 if not destwvfs.isdir():
235 if not destwvfs.isdir():
236 destwvfs.mkdir()
236 destwvfs.mkdir()
237 destvfs.makedir()
237 destvfs.makedir()
238
238
239 requirements = ''
239 requirements = ''
240 try:
240 try:
241 requirements = srcrepo.vfs.read('requires')
241 requirements = srcrepo.vfs.read('requires')
242 except IOError as inst:
242 except IOError as inst:
243 if inst.errno != errno.ENOENT:
243 if inst.errno != errno.ENOENT:
244 raise
244 raise
245
245
246 if relative:
246 if relative:
247 try:
247 try:
248 sharedpath = os.path.relpath(sharedpath, destvfs.base)
248 sharedpath = os.path.relpath(sharedpath, destvfs.base)
249 requirements += 'relshared\n'
249 requirements += 'relshared\n'
250 except (IOError, ValueError) as e:
250 except (IOError, ValueError) as e:
251 # ValueError is raised on Windows if the drive letters differ on
251 # ValueError is raised on Windows if the drive letters differ on
252 # each path
252 # each path
253 raise error.Abort(_('cannot calculate relative path'),
253 raise error.Abort(_('cannot calculate relative path'),
254 hint=str(e))
254 hint=str(e))
255 else:
255 else:
256 requirements += 'shared\n'
256 requirements += 'shared\n'
257
257
258 destvfs.write('requires', requirements)
258 destvfs.write('requires', requirements)
259 destvfs.write('sharedpath', sharedpath)
259 destvfs.write('sharedpath', sharedpath)
260
260
261 r = repository(ui, destwvfs.base)
261 r = repository(ui, destwvfs.base)
262 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
262 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
263 _postshareupdate(r, update, checkout=checkout)
263 _postshareupdate(r, update, checkout=checkout)
264 return r
264 return r
265
265
266 def unshare(ui, repo):
266 def unshare(ui, repo):
267 """convert a shared repository to a normal one
267 """convert a shared repository to a normal one
268
268
269 Copy the store data to the repo and remove the sharedpath data.
269 Copy the store data to the repo and remove the sharedpath data.
270 """
270 """
271
271
272 destlock = lock = None
272 destlock = lock = None
273 lock = repo.lock()
273 lock = repo.lock()
274 try:
274 try:
275 # we use locks here because if we race with commit, we
275 # we use locks here because if we race with commit, we
276 # can end up with extra data in the cloned revlogs that's
276 # can end up with extra data in the cloned revlogs that's
277 # not pointed to by changesets, thus causing verify to
277 # not pointed to by changesets, thus causing verify to
278 # fail
278 # fail
279
279
280 destlock = copystore(ui, repo, repo.path)
280 destlock = copystore(ui, repo, repo.path)
281
281
282 sharefile = repo.vfs.join('sharedpath')
282 sharefile = repo.vfs.join('sharedpath')
283 util.rename(sharefile, sharefile + '.old')
283 util.rename(sharefile, sharefile + '.old')
284
284
285 repo.requirements.discard('shared')
285 repo.requirements.discard('shared')
286 repo.requirements.discard('relshared')
286 repo.requirements.discard('relshared')
287 repo._writerequirements()
287 repo._writerequirements()
288 finally:
288 finally:
289 destlock and destlock.release()
289 destlock and destlock.release()
290 lock and lock.release()
290 lock and lock.release()
291
291
292 # update store, spath, svfs and sjoin of repo
292 # update store, spath, svfs and sjoin of repo
293 repo.unfiltered().__init__(repo.baseui, repo.root)
293 repo.unfiltered().__init__(repo.baseui, repo.root)
294
294
295 # TODO: figure out how to access subrepos that exist, but were previously
295 # TODO: figure out how to access subrepos that exist, but were previously
296 # removed from .hgsub
296 # removed from .hgsub
297 c = repo['.']
297 c = repo['.']
298 subs = c.substate
298 subs = c.substate
299 for s in sorted(subs):
299 for s in sorted(subs):
300 c.sub(s).unshare()
300 c.sub(s).unshare()
301
301
302 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
302 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
303 """Called after a new shared repo is created.
303 """Called after a new shared repo is created.
304
304
305 The new repo only has a requirements file and pointer to the source.
305 The new repo only has a requirements file and pointer to the source.
306 This function configures additional shared data.
306 This function configures additional shared data.
307
307
308 Extensions can wrap this function and write additional entries to
308 Extensions can wrap this function and write additional entries to
309 destrepo/.hg/shared to indicate additional pieces of data to be shared.
309 destrepo/.hg/shared to indicate additional pieces of data to be shared.
310 """
310 """
311 default = defaultpath or sourcerepo.ui.config('paths', 'default')
311 default = defaultpath or sourcerepo.ui.config('paths', 'default')
312 if default:
312 if default:
313 template = ('[paths]\n'
313 template = ('[paths]\n'
314 'default = %s\n')
314 'default = %s\n')
315 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
315 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
316
316
317 with destrepo.wlock():
317 with destrepo.wlock():
318 if bookmarks:
318 if bookmarks:
319 destrepo.vfs.write('shared', sharedbookmarks + '\n')
319 destrepo.vfs.write('shared', sharedbookmarks + '\n')
320
320
321 def _postshareupdate(repo, update, checkout=None):
321 def _postshareupdate(repo, update, checkout=None):
322 """Maybe perform a working directory update after a shared repo is created.
322 """Maybe perform a working directory update after a shared repo is created.
323
323
324 ``update`` can be a boolean or a revision to update to.
324 ``update`` can be a boolean or a revision to update to.
325 """
325 """
326 if not update:
326 if not update:
327 return
327 return
328
328
329 repo.ui.status(_("updating working directory\n"))
329 repo.ui.status(_("updating working directory\n"))
330 if update is not True:
330 if update is not True:
331 checkout = update
331 checkout = update
332 for test in (checkout, 'default', 'tip'):
332 for test in (checkout, 'default', 'tip'):
333 if test is None:
333 if test is None:
334 continue
334 continue
335 try:
335 try:
336 uprev = repo.lookup(test)
336 uprev = repo.lookup(test)
337 break
337 break
338 except error.RepoLookupError:
338 except error.RepoLookupError:
339 continue
339 continue
340 _update(repo, uprev)
340 _update(repo, uprev)
341
341
342 def copystore(ui, srcrepo, destpath):
342 def copystore(ui, srcrepo, destpath):
343 '''copy files from store of srcrepo in destpath
343 '''copy files from store of srcrepo in destpath
344
344
345 returns destlock
345 returns destlock
346 '''
346 '''
347 destlock = None
347 destlock = None
348 try:
348 try:
349 hardlink = None
349 hardlink = None
350 num = 0
350 num = 0
351 closetopic = [None]
351 closetopic = [None]
352 def prog(topic, pos):
352 def prog(topic, pos):
353 if pos is None:
353 if pos is None:
354 closetopic[0] = topic
354 closetopic[0] = topic
355 else:
355 else:
356 ui.progress(topic, pos + num)
356 ui.progress(topic, pos + num)
357 srcpublishing = srcrepo.publishing()
357 srcpublishing = srcrepo.publishing()
358 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
358 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
359 dstvfs = vfsmod.vfs(destpath)
359 dstvfs = vfsmod.vfs(destpath)
360 for f in srcrepo.store.copylist():
360 for f in srcrepo.store.copylist():
361 if srcpublishing and f.endswith('phaseroots'):
361 if srcpublishing and f.endswith('phaseroots'):
362 continue
362 continue
363 dstbase = os.path.dirname(f)
363 dstbase = os.path.dirname(f)
364 if dstbase and not dstvfs.exists(dstbase):
364 if dstbase and not dstvfs.exists(dstbase):
365 dstvfs.mkdir(dstbase)
365 dstvfs.mkdir(dstbase)
366 if srcvfs.exists(f):
366 if srcvfs.exists(f):
367 if f.endswith('data'):
367 if f.endswith('data'):
368 # 'dstbase' may be empty (e.g. revlog format 0)
368 # 'dstbase' may be empty (e.g. revlog format 0)
369 lockfile = os.path.join(dstbase, "lock")
369 lockfile = os.path.join(dstbase, "lock")
370 # lock to avoid premature writing to the target
370 # lock to avoid premature writing to the target
371 destlock = lock.lock(dstvfs, lockfile)
371 destlock = lock.lock(dstvfs, lockfile)
372 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
372 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
373 hardlink, progress=prog)
373 hardlink, progress=prog)
374 num += n
374 num += n
375 if hardlink:
375 if hardlink:
376 ui.debug("linked %d files\n" % num)
376 ui.debug("linked %d files\n" % num)
377 if closetopic[0]:
377 if closetopic[0]:
378 ui.progress(closetopic[0], None)
378 ui.progress(closetopic[0], None)
379 else:
379 else:
380 ui.debug("copied %d files\n" % num)
380 ui.debug("copied %d files\n" % num)
381 if closetopic[0]:
381 if closetopic[0]:
382 ui.progress(closetopic[0], None)
382 ui.progress(closetopic[0], None)
383 return destlock
383 return destlock
384 except: # re-raises
384 except: # re-raises
385 release(destlock)
385 release(destlock)
386 raise
386 raise
387
387
388 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
388 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
389 rev=None, update=True, stream=False):
389 rev=None, update=True, stream=False):
390 """Perform a clone using a shared repo.
390 """Perform a clone using a shared repo.
391
391
392 The store for the repository will be located at <sharepath>/.hg. The
392 The store for the repository will be located at <sharepath>/.hg. The
393 specified revisions will be cloned or pulled from "source". A shared repo
393 specified revisions will be cloned or pulled from "source". A shared repo
394 will be created at "dest" and a working copy will be created if "update" is
394 will be created at "dest" and a working copy will be created if "update" is
395 True.
395 True.
396 """
396 """
397 revs = None
397 revs = None
398 if rev:
398 if rev:
399 if not srcpeer.capable('lookup'):
399 if not srcpeer.capable('lookup'):
400 raise error.Abort(_("src repository does not support "
400 raise error.Abort(_("src repository does not support "
401 "revision lookup and so doesn't "
401 "revision lookup and so doesn't "
402 "support clone by revision"))
402 "support clone by revision"))
403 revs = [srcpeer.lookup(r) for r in rev]
403 revs = [srcpeer.lookup(r) for r in rev]
404
404
405 # Obtain a lock before checking for or cloning the pooled repo otherwise
405 # Obtain a lock before checking for or cloning the pooled repo otherwise
406 # 2 clients may race creating or populating it.
406 # 2 clients may race creating or populating it.
407 pooldir = os.path.dirname(sharepath)
407 pooldir = os.path.dirname(sharepath)
408 # lock class requires the directory to exist.
408 # lock class requires the directory to exist.
409 try:
409 try:
410 util.makedir(pooldir, False)
410 util.makedir(pooldir, False)
411 except OSError as e:
411 except OSError as e:
412 if e.errno != errno.EEXIST:
412 if e.errno != errno.EEXIST:
413 raise
413 raise
414
414
415 poolvfs = vfsmod.vfs(pooldir)
415 poolvfs = vfsmod.vfs(pooldir)
416 basename = os.path.basename(sharepath)
416 basename = os.path.basename(sharepath)
417
417
418 with lock.lock(poolvfs, '%s.lock' % basename):
418 with lock.lock(poolvfs, '%s.lock' % basename):
419 if os.path.exists(sharepath):
419 if os.path.exists(sharepath):
420 ui.status(_('(sharing from existing pooled repository %s)\n') %
420 ui.status(_('(sharing from existing pooled repository %s)\n') %
421 basename)
421 basename)
422 else:
422 else:
423 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
423 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
424 # Always use pull mode because hardlinks in share mode don't work
424 # Always use pull mode because hardlinks in share mode don't work
425 # well. Never update because working copies aren't necessary in
425 # well. Never update because working copies aren't necessary in
426 # share mode.
426 # share mode.
427 clone(ui, peeropts, source, dest=sharepath, pull=True,
427 clone(ui, peeropts, source, dest=sharepath, pull=True,
428 rev=rev, update=False, stream=stream)
428 rev=rev, update=False, stream=stream)
429
429
430 # Resolve the value to put in [paths] section for the source.
430 # Resolve the value to put in [paths] section for the source.
431 if islocal(source):
431 if islocal(source):
432 defaultpath = os.path.abspath(util.urllocalpath(source))
432 defaultpath = os.path.abspath(util.urllocalpath(source))
433 else:
433 else:
434 defaultpath = source
434 defaultpath = source
435
435
436 sharerepo = repository(ui, path=sharepath)
436 sharerepo = repository(ui, path=sharepath)
437 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
437 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
438 defaultpath=defaultpath)
438 defaultpath=defaultpath)
439
439
440 # We need to perform a pull against the dest repo to fetch bookmarks
440 # We need to perform a pull against the dest repo to fetch bookmarks
441 # and other non-store data that isn't shared by default. In the case of
441 # and other non-store data that isn't shared by default. In the case of
442 # non-existing shared repo, this means we pull from the remote twice. This
442 # non-existing shared repo, this means we pull from the remote twice. This
443 # is a bit weird. But at the time it was implemented, there wasn't an easy
443 # is a bit weird. But at the time it was implemented, there wasn't an easy
444 # way to pull just non-changegroup data.
444 # way to pull just non-changegroup data.
445 destrepo = repository(ui, path=dest)
445 destrepo = repository(ui, path=dest)
446 exchange.pull(destrepo, srcpeer, heads=revs)
446 exchange.pull(destrepo, srcpeer, heads=revs)
447
447
448 _postshareupdate(destrepo, update)
448 _postshareupdate(destrepo, update)
449
449
450 return srcpeer, peer(ui, peeropts, dest)
450 return srcpeer, peer(ui, peeropts, dest)
451
451
452 # Recomputing branch cache might be slow on big repos,
452 # Recomputing branch cache might be slow on big repos,
453 # so just copy it
453 # so just copy it
454 def _copycache(srcrepo, dstcachedir, fname):
454 def _copycache(srcrepo, dstcachedir, fname):
455 """copy a cache from srcrepo to destcachedir (if it exists)"""
455 """copy a cache from srcrepo to destcachedir (if it exists)"""
456 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
456 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
457 dstbranchcache = os.path.join(dstcachedir, fname)
457 dstbranchcache = os.path.join(dstcachedir, fname)
458 if os.path.exists(srcbranchcache):
458 if os.path.exists(srcbranchcache):
459 if not os.path.exists(dstcachedir):
459 if not os.path.exists(dstcachedir):
460 os.mkdir(dstcachedir)
460 os.mkdir(dstcachedir)
461 util.copyfile(srcbranchcache, dstbranchcache)
461 util.copyfile(srcbranchcache, dstbranchcache)
462
462
463 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
463 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
464 update=True, stream=False, branch=None, shareopts=None):
464 update=True, stream=False, branch=None, shareopts=None):
465 """Make a copy of an existing repository.
465 """Make a copy of an existing repository.
466
466
467 Create a copy of an existing repository in a new directory. The
467 Create a copy of an existing repository in a new directory. The
468 source and destination are URLs, as passed to the repository
468 source and destination are URLs, as passed to the repository
469 function. Returns a pair of repository peers, the source and
469 function. Returns a pair of repository peers, the source and
470 newly created destination.
470 newly created destination.
471
471
472 The location of the source is added to the new repository's
472 The location of the source is added to the new repository's
473 .hg/hgrc file, as the default to be used for future pulls and
473 .hg/hgrc file, as the default to be used for future pulls and
474 pushes.
474 pushes.
475
475
476 If an exception is raised, the partly cloned/updated destination
476 If an exception is raised, the partly cloned/updated destination
477 repository will be deleted.
477 repository will be deleted.
478
478
479 Arguments:
479 Arguments:
480
480
481 source: repository object or URL
481 source: repository object or URL
482
482
483 dest: URL of destination repository to create (defaults to base
483 dest: URL of destination repository to create (defaults to base
484 name of source repository)
484 name of source repository)
485
485
486 pull: always pull from source repository, even in local case or if the
486 pull: always pull from source repository, even in local case or if the
487 server prefers streaming
487 server prefers streaming
488
488
489 stream: stream raw data uncompressed from repository (fast over
489 stream: stream raw data uncompressed from repository (fast over
490 LAN, slow over WAN)
490 LAN, slow over WAN)
491
491
492 rev: revision to clone up to (implies pull=True)
492 rev: revision to clone up to (implies pull=True)
493
493
494 update: update working directory after clone completes, if
494 update: update working directory after clone completes, if
495 destination is local repository (True means update to default rev,
495 destination is local repository (True means update to default rev,
496 anything else is treated as a revision)
496 anything else is treated as a revision)
497
497
498 branch: branches to clone
498 branch: branches to clone
499
499
500 shareopts: dict of options to control auto sharing behavior. The "pool" key
500 shareopts: dict of options to control auto sharing behavior. The "pool" key
501 activates auto sharing mode and defines the directory for stores. The
501 activates auto sharing mode and defines the directory for stores. The
502 "mode" key determines how to construct the directory name of the shared
502 "mode" key determines how to construct the directory name of the shared
503 repository. "identity" means the name is derived from the node of the first
503 repository. "identity" means the name is derived from the node of the first
504 changeset in the repository. "remote" means the name is derived from the
504 changeset in the repository. "remote" means the name is derived from the
505 remote's path/URL. Defaults to "identity."
505 remote's path/URL. Defaults to "identity."
506 """
506 """
507
507
508 if isinstance(source, bytes):
508 if isinstance(source, bytes):
509 origsource = ui.expandpath(source)
509 origsource = ui.expandpath(source)
510 source, branch = parseurl(origsource, branch)
510 source, branch = parseurl(origsource, branch)
511 srcpeer = peer(ui, peeropts, source)
511 srcpeer = peer(ui, peeropts, source)
512 else:
512 else:
513 srcpeer = source.peer() # in case we were called with a localrepo
513 srcpeer = source.peer() # in case we were called with a localrepo
514 branch = (None, branch or [])
514 branch = (None, branch or [])
515 origsource = source = srcpeer.url()
515 origsource = source = srcpeer.url()
516 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
516 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
517
517
518 if dest is None:
518 if dest is None:
519 dest = defaultdest(source)
519 dest = defaultdest(source)
520 if dest:
520 if dest:
521 ui.status(_("destination directory: %s\n") % dest)
521 ui.status(_("destination directory: %s\n") % dest)
522 else:
522 else:
523 dest = ui.expandpath(dest)
523 dest = ui.expandpath(dest)
524
524
525 dest = util.urllocalpath(dest)
525 dest = util.urllocalpath(dest)
526 source = util.urllocalpath(source)
526 source = util.urllocalpath(source)
527
527
528 if not dest:
528 if not dest:
529 raise error.Abort(_("empty destination path is not valid"))
529 raise error.Abort(_("empty destination path is not valid"))
530
530
531 destvfs = vfsmod.vfs(dest, expandpath=True)
531 destvfs = vfsmod.vfs(dest, expandpath=True)
532 if destvfs.lexists():
532 if destvfs.lexists():
533 if not destvfs.isdir():
533 if not destvfs.isdir():
534 raise error.Abort(_("destination '%s' already exists") % dest)
534 raise error.Abort(_("destination '%s' already exists") % dest)
535 elif destvfs.listdir():
535 elif destvfs.listdir():
536 raise error.Abort(_("destination '%s' is not empty") % dest)
536 raise error.Abort(_("destination '%s' is not empty") % dest)
537
537
538 shareopts = shareopts or {}
538 shareopts = shareopts or {}
539 sharepool = shareopts.get('pool')
539 sharepool = shareopts.get('pool')
540 sharenamemode = shareopts.get('mode')
540 sharenamemode = shareopts.get('mode')
541 if sharepool and islocal(dest):
541 if sharepool and islocal(dest):
542 sharepath = None
542 sharepath = None
543 if sharenamemode == 'identity':
543 if sharenamemode == 'identity':
544 # Resolve the name from the initial changeset in the remote
544 # Resolve the name from the initial changeset in the remote
545 # repository. This returns nullid when the remote is empty. It
545 # repository. This returns nullid when the remote is empty. It
546 # raises RepoLookupError if revision 0 is filtered or otherwise
546 # raises RepoLookupError if revision 0 is filtered or otherwise
547 # not available. If we fail to resolve, sharing is not enabled.
547 # not available. If we fail to resolve, sharing is not enabled.
548 try:
548 try:
549 rootnode = srcpeer.lookup('0')
549 rootnode = srcpeer.lookup('0')
550 if rootnode != node.nullid:
550 if rootnode != node.nullid:
551 sharepath = os.path.join(sharepool, node.hex(rootnode))
551 sharepath = os.path.join(sharepool, node.hex(rootnode))
552 else:
552 else:
553 ui.status(_('(not using pooled storage: '
553 ui.status(_('(not using pooled storage: '
554 'remote appears to be empty)\n'))
554 'remote appears to be empty)\n'))
555 except error.RepoLookupError:
555 except error.RepoLookupError:
556 ui.status(_('(not using pooled storage: '
556 ui.status(_('(not using pooled storage: '
557 'unable to resolve identity of remote)\n'))
557 'unable to resolve identity of remote)\n'))
558 elif sharenamemode == 'remote':
558 elif sharenamemode == 'remote':
559 sharepath = os.path.join(
559 sharepath = os.path.join(
560 sharepool, node.hex(hashlib.sha1(source).digest()))
560 sharepool, node.hex(hashlib.sha1(source).digest()))
561 else:
561 else:
562 raise error.Abort(_('unknown share naming mode: %s') %
562 raise error.Abort(_('unknown share naming mode: %s') %
563 sharenamemode)
563 sharenamemode)
564
564
565 if sharepath:
565 if sharepath:
566 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
566 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
567 dest, pull=pull, rev=rev, update=update,
567 dest, pull=pull, rev=rev, update=update,
568 stream=stream)
568 stream=stream)
569
569
570 srclock = destlock = cleandir = None
570 srclock = destlock = cleandir = None
571 srcrepo = srcpeer.local()
571 srcrepo = srcpeer.local()
572 try:
572 try:
573 abspath = origsource
573 abspath = origsource
574 if islocal(origsource):
574 if islocal(origsource):
575 abspath = os.path.abspath(util.urllocalpath(origsource))
575 abspath = os.path.abspath(util.urllocalpath(origsource))
576
576
577 if islocal(dest):
577 if islocal(dest):
578 cleandir = dest
578 cleandir = dest
579
579
580 copy = False
580 copy = False
581 if (srcrepo and srcrepo.cancopy() and islocal(dest)
581 if (srcrepo and srcrepo.cancopy() and islocal(dest)
582 and not phases.hassecret(srcrepo)):
582 and not phases.hassecret(srcrepo)):
583 copy = not pull and not rev
583 copy = not pull and not rev
584
584
585 if copy:
585 if copy:
586 try:
586 try:
587 # we use a lock here because if we race with commit, we
587 # we use a lock here because if we race with commit, we
588 # can end up with extra data in the cloned revlogs that's
588 # can end up with extra data in the cloned revlogs that's
589 # not pointed to by changesets, thus causing verify to
589 # not pointed to by changesets, thus causing verify to
590 # fail
590 # fail
591 srclock = srcrepo.lock(wait=False)
591 srclock = srcrepo.lock(wait=False)
592 except error.LockError:
592 except error.LockError:
593 copy = False
593 copy = False
594
594
595 if copy:
595 if copy:
596 srcrepo.hook('preoutgoing', throw=True, source='clone')
596 srcrepo.hook('preoutgoing', throw=True, source='clone')
597 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
597 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
598 if not os.path.exists(dest):
598 if not os.path.exists(dest):
599 os.mkdir(dest)
599 os.mkdir(dest)
600 else:
600 else:
601 # only clean up directories we create ourselves
601 # only clean up directories we create ourselves
602 cleandir = hgdir
602 cleandir = hgdir
603 try:
603 try:
604 destpath = hgdir
604 destpath = hgdir
605 util.makedir(destpath, notindexed=True)
605 util.makedir(destpath, notindexed=True)
606 except OSError as inst:
606 except OSError as inst:
607 if inst.errno == errno.EEXIST:
607 if inst.errno == errno.EEXIST:
608 cleandir = None
608 cleandir = None
609 raise error.Abort(_("destination '%s' already exists")
609 raise error.Abort(_("destination '%s' already exists")
610 % dest)
610 % dest)
611 raise
611 raise
612
612
613 destlock = copystore(ui, srcrepo, destpath)
613 destlock = copystore(ui, srcrepo, destpath)
614 # copy bookmarks over
614 # copy bookmarks over
615 srcbookmarks = srcrepo.vfs.join('bookmarks')
615 srcbookmarks = srcrepo.vfs.join('bookmarks')
616 dstbookmarks = os.path.join(destpath, 'bookmarks')
616 dstbookmarks = os.path.join(destpath, 'bookmarks')
617 if os.path.exists(srcbookmarks):
617 if os.path.exists(srcbookmarks):
618 util.copyfile(srcbookmarks, dstbookmarks)
618 util.copyfile(srcbookmarks, dstbookmarks)
619
619
620 dstcachedir = os.path.join(destpath, 'cache')
620 dstcachedir = os.path.join(destpath, 'cache')
621 for cache in cacheutil.cachetocopy(srcrepo):
621 for cache in cacheutil.cachetocopy(srcrepo):
622 _copycache(srcrepo, dstcachedir, cache)
622 _copycache(srcrepo, dstcachedir, cache)
623
623
624 # we need to re-init the repo after manually copying the data
624 # we need to re-init the repo after manually copying the data
625 # into it
625 # into it
626 destpeer = peer(srcrepo, peeropts, dest)
626 destpeer = peer(srcrepo, peeropts, dest)
627 srcrepo.hook('outgoing', source='clone',
627 srcrepo.hook('outgoing', source='clone',
628 node=node.hex(node.nullid))
628 node=node.hex(node.nullid))
629 else:
629 else:
630 try:
630 try:
631 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
631 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
632 # only pass ui when no srcrepo
632 # only pass ui when no srcrepo
633 except OSError as inst:
633 except OSError as inst:
634 if inst.errno == errno.EEXIST:
634 if inst.errno == errno.EEXIST:
635 cleandir = None
635 cleandir = None
636 raise error.Abort(_("destination '%s' already exists")
636 raise error.Abort(_("destination '%s' already exists")
637 % dest)
637 % dest)
638 raise
638 raise
639
639
640 revs = None
640 revs = None
641 if rev:
641 if rev:
642 if not srcpeer.capable('lookup'):
642 if not srcpeer.capable('lookup'):
643 raise error.Abort(_("src repository does not support "
643 raise error.Abort(_("src repository does not support "
644 "revision lookup and so doesn't "
644 "revision lookup and so doesn't "
645 "support clone by revision"))
645 "support clone by revision"))
646 revs = [srcpeer.lookup(r) for r in rev]
646 revs = [srcpeer.lookup(r) for r in rev]
647 checkout = revs[0]
647 checkout = revs[0]
648 local = destpeer.local()
648 local = destpeer.local()
649 if local:
649 if local:
650 u = util.url(abspath)
650 u = util.url(abspath)
651 defaulturl = bytes(u)
651 defaulturl = bytes(u)
652 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
652 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
653 if not stream:
653 if not stream:
654 if pull:
654 if pull:
655 stream = False
655 stream = False
656 else:
656 else:
657 stream = None
657 stream = None
658 # internal config: ui.quietbookmarkmove
658 # internal config: ui.quietbookmarkmove
659 overrides = {('ui', 'quietbookmarkmove'): True}
659 overrides = {('ui', 'quietbookmarkmove'): True}
660 with local.ui.configoverride(overrides, 'clone'):
660 with local.ui.configoverride(overrides, 'clone'):
661 exchange.pull(local, srcpeer, revs,
661 exchange.pull(local, srcpeer, revs,
662 streamclonerequested=stream)
662 streamclonerequested=stream)
663 elif srcrepo:
663 elif srcrepo:
664 exchange.push(srcrepo, destpeer, revs=revs,
664 exchange.push(srcrepo, destpeer, revs=revs,
665 bookmarks=srcrepo._bookmarks.keys())
665 bookmarks=srcrepo._bookmarks.keys())
666 else:
666 else:
667 raise error.Abort(_("clone from remote to remote not supported")
667 raise error.Abort(_("clone from remote to remote not supported")
668 )
668 )
669
669
670 cleandir = None
670 cleandir = None
671
671
672 destrepo = destpeer.local()
672 destrepo = destpeer.local()
673 if destrepo:
673 if destrepo:
674 template = uimod.samplehgrcs['cloned']
674 template = uimod.samplehgrcs['cloned']
675 u = util.url(abspath)
675 u = util.url(abspath)
676 u.passwd = None
676 u.passwd = None
677 defaulturl = bytes(u)
677 defaulturl = bytes(u)
678 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
678 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
679 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
679 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
680
680
681 if ui.configbool('experimental', 'remotenames'):
681 if ui.configbool('experimental', 'remotenames'):
682 logexchange.pullremotenames(destrepo, srcpeer)
682 logexchange.pullremotenames(destrepo, srcpeer)
683
683
684 if update:
684 if update:
685 if update is not True:
685 if update is not True:
686 checkout = srcpeer.lookup(update)
686 checkout = srcpeer.lookup(update)
687 uprev = None
687 uprev = None
688 status = None
688 status = None
689 if checkout is not None:
689 if checkout is not None:
690 try:
690 try:
691 uprev = destrepo.lookup(checkout)
691 uprev = destrepo.lookup(checkout)
692 except error.RepoLookupError:
692 except error.RepoLookupError:
693 if update is not True:
693 if update is not True:
694 try:
694 try:
695 uprev = destrepo.lookup(update)
695 uprev = destrepo.lookup(update)
696 except error.RepoLookupError:
696 except error.RepoLookupError:
697 pass
697 pass
698 if uprev is None:
698 if uprev is None:
699 try:
699 try:
700 uprev = destrepo._bookmarks['@']
700 uprev = destrepo._bookmarks['@']
701 update = '@'
701 update = '@'
702 bn = destrepo[uprev].branch()
702 bn = destrepo[uprev].branch()
703 if bn == 'default':
703 if bn == 'default':
704 status = _("updating to bookmark @\n")
704 status = _("updating to bookmark @\n")
705 else:
705 else:
706 status = (_("updating to bookmark @ on branch %s\n")
706 status = (_("updating to bookmark @ on branch %s\n")
707 % bn)
707 % bn)
708 except KeyError:
708 except KeyError:
709 try:
709 try:
710 uprev = destrepo.branchtip('default')
710 uprev = destrepo.branchtip('default')
711 except error.RepoLookupError:
711 except error.RepoLookupError:
712 uprev = destrepo.lookup('tip')
712 uprev = destrepo.lookup('tip')
713 if not status:
713 if not status:
714 bn = destrepo[uprev].branch()
714 bn = destrepo[uprev].branch()
715 status = _("updating to branch %s\n") % bn
715 status = _("updating to branch %s\n") % bn
716 destrepo.ui.status(status)
716 destrepo.ui.status(status)
717 _update(destrepo, uprev)
717 _update(destrepo, uprev)
718 if update in destrepo._bookmarks:
718 if update in destrepo._bookmarks:
719 bookmarks.activate(destrepo, update)
719 bookmarks.activate(destrepo, update)
720 finally:
720 finally:
721 release(srclock, destlock)
721 release(srclock, destlock)
722 if cleandir is not None:
722 if cleandir is not None:
723 shutil.rmtree(cleandir, True)
723 shutil.rmtree(cleandir, True)
724 if srcpeer is not None:
724 if srcpeer is not None:
725 srcpeer.close()
725 srcpeer.close()
726 return srcpeer, destpeer
726 return srcpeer, destpeer
727
727
728 def _showstats(repo, stats, quietempty=False):
728 def _showstats(repo, stats, quietempty=False):
729 if quietempty and not any(stats):
729 if quietempty and not any(stats):
730 return
730 return
731 repo.ui.status(_("%d files updated, %d files merged, "
731 repo.ui.status(_("%d files updated, %d files merged, "
732 "%d files removed, %d files unresolved\n") % stats)
732 "%d files removed, %d files unresolved\n") % stats)
733
733
734 def updaterepo(repo, node, overwrite, updatecheck=None):
734 def updaterepo(repo, node, overwrite, updatecheck=None):
735 """Update the working directory to node.
735 """Update the working directory to node.
736
736
737 When overwrite is set, changes are clobbered, merged else
737 When overwrite is set, changes are clobbered, merged else
738
738
739 returns stats (see pydoc mercurial.merge.applyupdates)"""
739 returns stats (see pydoc mercurial.merge.applyupdates)"""
740 return mergemod.update(repo, node, False, overwrite,
740 return mergemod.update(repo, node, False, overwrite,
741 labels=['working copy', 'destination'],
741 labels=['working copy', 'destination'],
742 updatecheck=updatecheck)
742 updatecheck=updatecheck)
743
743
744 def update(repo, node, quietempty=False, updatecheck=None):
744 def update(repo, node, quietempty=False, updatecheck=None):
745 """update the working directory to node"""
745 """update the working directory to node"""
746 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
746 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
747 _showstats(repo, stats, quietempty)
747 _showstats(repo, stats, quietempty)
748 if stats[3]:
748 if stats[3]:
749 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
749 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
750 return stats[3] > 0
750 return stats[3] > 0
751
751
752 # naming conflict in clone()
752 # naming conflict in clone()
753 _update = update
753 _update = update
754
754
755 def clean(repo, node, show_stats=True, quietempty=False):
755 def clean(repo, node, show_stats=True, quietempty=False):
756 """forcibly switch the working directory to node, clobbering changes"""
756 """forcibly switch the working directory to node, clobbering changes"""
757 stats = updaterepo(repo, node, True)
757 stats = updaterepo(repo, node, True)
758 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
758 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
759 if show_stats:
759 if show_stats:
760 _showstats(repo, stats, quietempty)
760 _showstats(repo, stats, quietempty)
761 return stats[3] > 0
761 return stats[3] > 0
762
762
763 # naming conflict in updatetotally()
763 # naming conflict in updatetotally()
764 _clean = clean
764 _clean = clean
765
765
766 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
766 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
767 """Update the working directory with extra care for non-file components
767 """Update the working directory with extra care for non-file components
768
768
769 This takes care of non-file components below:
769 This takes care of non-file components below:
770
770
771 :bookmark: might be advanced or (in)activated
771 :bookmark: might be advanced or (in)activated
772
772
773 This takes arguments below:
773 This takes arguments below:
774
774
775 :checkout: to which revision the working directory is updated
775 :checkout: to which revision the working directory is updated
776 :brev: a name, which might be a bookmark to be activated after updating
776 :brev: a name, which might be a bookmark to be activated after updating
777 :clean: whether changes in the working directory can be discarded
777 :clean: whether changes in the working directory can be discarded
778 :updatecheck: how to deal with a dirty working directory
778 :updatecheck: how to deal with a dirty working directory
779
779
780 Valid values for updatecheck are (None => linear):
780 Valid values for updatecheck are (None => linear):
781
781
782 * abort: abort if the working directory is dirty
782 * abort: abort if the working directory is dirty
783 * none: don't check (merge working directory changes into destination)
783 * none: don't check (merge working directory changes into destination)
784 * linear: check that update is linear before merging working directory
784 * linear: check that update is linear before merging working directory
785 changes into destination
785 changes into destination
786 * noconflict: check that the update does not result in file merges
786 * noconflict: check that the update does not result in file merges
787
787
788 This returns whether conflict is detected at updating or not.
788 This returns whether conflict is detected at updating or not.
789 """
789 """
790 if updatecheck is None:
790 if updatecheck is None:
791 updatecheck = ui.config('commands', 'update.check')
791 updatecheck = ui.config('commands', 'update.check')
792 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
792 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
793 # If not configured, or invalid value configured
793 # If not configured, or invalid value configured
794 updatecheck = 'linear'
794 updatecheck = 'linear'
795 with repo.wlock():
795 with repo.wlock():
796 movemarkfrom = None
796 movemarkfrom = None
797 warndest = False
797 warndest = False
798 if checkout is None:
798 if checkout is None:
799 updata = destutil.destupdate(repo, clean=clean)
799 updata = destutil.destupdate(repo, clean=clean)
800 checkout, movemarkfrom, brev = updata
800 checkout, movemarkfrom, brev = updata
801 warndest = True
801 warndest = True
802
802
803 if clean:
803 if clean:
804 ret = _clean(repo, checkout)
804 ret = _clean(repo, checkout)
805 else:
805 else:
806 if updatecheck == 'abort':
806 if updatecheck == 'abort':
807 cmdutil.bailifchanged(repo, merge=False)
807 cmdutil.bailifchanged(repo, merge=False)
808 updatecheck = 'none'
808 updatecheck = 'none'
809 ret = _update(repo, checkout, updatecheck=updatecheck)
809 ret = _update(repo, checkout, updatecheck=updatecheck)
810
810
811 if not ret and movemarkfrom:
811 if not ret and movemarkfrom:
812 if movemarkfrom == repo['.'].node():
812 if movemarkfrom == repo['.'].node():
813 pass # no-op update
813 pass # no-op update
814 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
814 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
815 b = ui.label(repo._activebookmark, 'bookmarks.active')
815 b = ui.label(repo._activebookmark, 'bookmarks.active')
816 ui.status(_("updating bookmark %s\n") % b)
816 ui.status(_("updating bookmark %s\n") % b)
817 else:
817 else:
818 # this can happen with a non-linear update
818 # this can happen with a non-linear update
819 b = ui.label(repo._activebookmark, 'bookmarks')
819 b = ui.label(repo._activebookmark, 'bookmarks')
820 ui.status(_("(leaving bookmark %s)\n") % b)
820 ui.status(_("(leaving bookmark %s)\n") % b)
821 bookmarks.deactivate(repo)
821 bookmarks.deactivate(repo)
822 elif brev in repo._bookmarks:
822 elif brev in repo._bookmarks:
823 if brev != repo._activebookmark:
823 if brev != repo._activebookmark:
824 b = ui.label(brev, 'bookmarks.active')
824 b = ui.label(brev, 'bookmarks.active')
825 ui.status(_("(activating bookmark %s)\n") % b)
825 ui.status(_("(activating bookmark %s)\n") % b)
826 bookmarks.activate(repo, brev)
826 bookmarks.activate(repo, brev)
827 elif brev:
827 elif brev:
828 if repo._activebookmark:
828 if repo._activebookmark:
829 b = ui.label(repo._activebookmark, 'bookmarks')
829 b = ui.label(repo._activebookmark, 'bookmarks')
830 ui.status(_("(leaving bookmark %s)\n") % b)
830 ui.status(_("(leaving bookmark %s)\n") % b)
831 bookmarks.deactivate(repo)
831 bookmarks.deactivate(repo)
832
832
833 if warndest:
833 if warndest:
834 destutil.statusotherdests(ui, repo)
834 destutil.statusotherdests(ui, repo)
835
835
836 return ret
836 return ret
837
837
838 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
838 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
839 abort=False):
839 abort=False):
840 """Branch merge with node, resolving changes. Return true if any
840 """Branch merge with node, resolving changes. Return true if any
841 unresolved conflicts."""
841 unresolved conflicts."""
842 if not abort:
842 if not abort:
843 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
843 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
844 labels=labels)
844 labels=labels)
845 else:
845 else:
846 ms = mergemod.mergestate.read(repo)
846 ms = mergemod.mergestate.read(repo)
847 if ms.active():
847 if ms.active():
848 # there were conflicts
848 # there were conflicts
849 node = ms.localctx.hex()
849 node = ms.localctx.hex()
850 else:
850 else:
851 # there were no conficts, mergestate was not stored
851 # there were no conficts, mergestate was not stored
852 node = repo['.'].hex()
852 node = repo['.'].hex()
853
853
854 repo.ui.status(_("aborting the merge, updating back to"
854 repo.ui.status(_("aborting the merge, updating back to"
855 " %s\n") % node[:12])
855 " %s\n") % node[:12])
856 stats = mergemod.update(repo, node, branchmerge=False, force=True,
856 stats = mergemod.update(repo, node, branchmerge=False, force=True,
857 labels=labels)
857 labels=labels)
858
858
859 _showstats(repo, stats)
859 _showstats(repo, stats)
860 if stats[3]:
860 if stats[3]:
861 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
861 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
862 "or 'hg merge --abort' to abandon\n"))
862 "or 'hg merge --abort' to abandon\n"))
863 elif remind and not abort:
863 elif remind and not abort:
864 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
864 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
865 return stats[3] > 0
865 return stats[3] > 0
866
866
867 def _incoming(displaychlist, subreporecurse, ui, repo, source,
867 def _incoming(displaychlist, subreporecurse, ui, repo, source,
868 opts, buffered=False):
868 opts, buffered=False):
869 """
869 """
870 Helper for incoming / gincoming.
870 Helper for incoming / gincoming.
871 displaychlist gets called with
871 displaychlist gets called with
872 (remoterepo, incomingchangesetlist, displayer) parameters,
872 (remoterepo, incomingchangesetlist, displayer) parameters,
873 and is supposed to contain only code that can't be unified.
873 and is supposed to contain only code that can't be unified.
874 """
874 """
875 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
875 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
876 other = peer(repo, opts, source)
876 other = peer(repo, opts, source)
877 ui.status(_('comparing with %s\n') % util.hidepassword(source))
877 ui.status(_('comparing with %s\n') % util.hidepassword(source))
878 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
878 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
879
879
880 if revs:
880 if revs:
881 revs = [other.lookup(rev) for rev in revs]
881 revs = [other.lookup(rev) for rev in revs]
882 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
882 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
883 revs, opts["bundle"], opts["force"])
883 revs, opts["bundle"], opts["force"])
884 try:
884 try:
885 if not chlist:
885 if not chlist:
886 ui.status(_("no changes found\n"))
886 ui.status(_("no changes found\n"))
887 return subreporecurse()
887 return subreporecurse()
888 ui.pager('incoming')
888 ui.pager('incoming')
889 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
889 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
890 buffered=buffered)
890 buffered=buffered)
891 displaychlist(other, chlist, displayer)
891 displaychlist(other, chlist, displayer)
892 displayer.close()
892 displayer.close()
893 finally:
893 finally:
894 cleanupfn()
894 cleanupfn()
895 subreporecurse()
895 subreporecurse()
896 return 0 # exit code is zero since we found incoming changes
896 return 0 # exit code is zero since we found incoming changes
897
897
898 def incoming(ui, repo, source, opts):
898 def incoming(ui, repo, source, opts):
899 def subreporecurse():
899 def subreporecurse():
900 ret = 1
900 ret = 1
901 if opts.get('subrepos'):
901 if opts.get('subrepos'):
902 ctx = repo[None]
902 ctx = repo[None]
903 for subpath in sorted(ctx.substate):
903 for subpath in sorted(ctx.substate):
904 sub = ctx.sub(subpath)
904 sub = ctx.sub(subpath)
905 ret = min(ret, sub.incoming(ui, source, opts))
905 ret = min(ret, sub.incoming(ui, source, opts))
906 return ret
906 return ret
907
907
908 def display(other, chlist, displayer):
908 def display(other, chlist, displayer):
909 limit = logcmdutil.getlimit(opts)
909 limit = logcmdutil.getlimit(opts)
910 if opts.get('newest_first'):
910 if opts.get('newest_first'):
911 chlist.reverse()
911 chlist.reverse()
912 count = 0
912 count = 0
913 for n in chlist:
913 for n in chlist:
914 if limit is not None and count >= limit:
914 if limit is not None and count >= limit:
915 break
915 break
916 parents = [p for p in other.changelog.parents(n) if p != nullid]
916 parents = [p for p in other.changelog.parents(n) if p != nullid]
917 if opts.get('no_merges') and len(parents) == 2:
917 if opts.get('no_merges') and len(parents) == 2:
918 continue
918 continue
919 count += 1
919 count += 1
920 displayer.show(other[n])
920 displayer.show(other[n])
921 return _incoming(display, subreporecurse, ui, repo, source, opts)
921 return _incoming(display, subreporecurse, ui, repo, source, opts)
922
922
923 def _outgoing(ui, repo, dest, opts):
923 def _outgoing(ui, repo, dest, opts):
924 path = ui.paths.getpath(dest, default=('default-push', 'default'))
924 path = ui.paths.getpath(dest, default=('default-push', 'default'))
925 if not path:
925 if not path:
926 raise error.Abort(_('default repository not configured!'),
926 raise error.Abort(_('default repository not configured!'),
927 hint=_("see 'hg help config.paths'"))
927 hint=_("see 'hg help config.paths'"))
928 dest = path.pushloc or path.loc
928 dest = path.pushloc or path.loc
929 branches = path.branch, opts.get('branch') or []
929 branches = path.branch, opts.get('branch') or []
930
930
931 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
931 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
932 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
932 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
933 if revs:
933 if revs:
934 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
934 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
935
935
936 other = peer(repo, opts, dest)
936 other = peer(repo, opts, dest)
937 outgoing = discovery.findcommonoutgoing(repo, other, revs,
937 outgoing = discovery.findcommonoutgoing(repo, other, revs,
938 force=opts.get('force'))
938 force=opts.get('force'))
939 o = outgoing.missing
939 o = outgoing.missing
940 if not o:
940 if not o:
941 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
941 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
942 return o, other
942 return o, other
943
943
944 def outgoing(ui, repo, dest, opts):
944 def outgoing(ui, repo, dest, opts):
945 def recurse():
945 def recurse():
946 ret = 1
946 ret = 1
947 if opts.get('subrepos'):
947 if opts.get('subrepos'):
948 ctx = repo[None]
948 ctx = repo[None]
949 for subpath in sorted(ctx.substate):
949 for subpath in sorted(ctx.substate):
950 sub = ctx.sub(subpath)
950 sub = ctx.sub(subpath)
951 ret = min(ret, sub.outgoing(ui, dest, opts))
951 ret = min(ret, sub.outgoing(ui, dest, opts))
952 return ret
952 return ret
953
953
954 limit = logcmdutil.getlimit(opts)
954 limit = logcmdutil.getlimit(opts)
955 o, other = _outgoing(ui, repo, dest, opts)
955 o, other = _outgoing(ui, repo, dest, opts)
956 if not o:
956 if not o:
957 cmdutil.outgoinghooks(ui, repo, other, opts, o)
957 cmdutil.outgoinghooks(ui, repo, other, opts, o)
958 return recurse()
958 return recurse()
959
959
960 if opts.get('newest_first'):
960 if opts.get('newest_first'):
961 o.reverse()
961 o.reverse()
962 ui.pager('outgoing')
962 ui.pager('outgoing')
963 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
963 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
964 count = 0
964 count = 0
965 for n in o:
965 for n in o:
966 if limit is not None and count >= limit:
966 if limit is not None and count >= limit:
967 break
967 break
968 parents = [p for p in repo.changelog.parents(n) if p != nullid]
968 parents = [p for p in repo.changelog.parents(n) if p != nullid]
969 if opts.get('no_merges') and len(parents) == 2:
969 if opts.get('no_merges') and len(parents) == 2:
970 continue
970 continue
971 count += 1
971 count += 1
972 displayer.show(repo[n])
972 displayer.show(repo[n])
973 displayer.close()
973 displayer.close()
974 cmdutil.outgoinghooks(ui, repo, other, opts, o)
974 cmdutil.outgoinghooks(ui, repo, other, opts, o)
975 recurse()
975 recurse()
976 return 0 # exit code is zero since we found outgoing changes
976 return 0 # exit code is zero since we found outgoing changes
977
977
978 def verify(repo):
978 def verify(repo):
979 """verify the consistency of a repository"""
979 """verify the consistency of a repository"""
980 ret = verifymod.verify(repo)
980 ret = verifymod.verify(repo)
981
981
982 # Broken subrepo references in hidden csets don't seem worth worrying about,
982 # Broken subrepo references in hidden csets don't seem worth worrying about,
983 # since they can't be pushed/pulled, and --hidden can be used if they are a
983 # since they can't be pushed/pulled, and --hidden can be used if they are a
984 # concern.
984 # concern.
985
985
986 # pathto() is needed for -R case
986 # pathto() is needed for -R case
987 revs = repo.revs("filelog(%s)",
987 revs = repo.revs("filelog(%s)",
988 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
988 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
989
989
990 if revs:
990 if revs:
991 repo.ui.status(_('checking subrepo links\n'))
991 repo.ui.status(_('checking subrepo links\n'))
992 for rev in revs:
992 for rev in revs:
993 ctx = repo[rev]
993 ctx = repo[rev]
994 try:
994 try:
995 for subpath in ctx.substate:
995 for subpath in ctx.substate:
996 try:
996 try:
997 ret = (ctx.sub(subpath, allowcreate=False).verify()
997 ret = (ctx.sub(subpath, allowcreate=False).verify()
998 or ret)
998 or ret)
999 except error.RepoError as e:
999 except error.RepoError as e:
1000 repo.ui.warn(('%s: %s\n') % (rev, e))
1000 repo.ui.warn(('%s: %s\n') % (rev, e))
1001 except Exception:
1001 except Exception:
1002 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1002 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1003 node.short(ctx.node()))
1003 node.short(ctx.node()))
1004
1004
1005 return ret
1005 return ret
1006
1006
1007 def remoteui(src, opts):
1007 def remoteui(src, opts):
1008 'build a remote ui from ui or repo and opts'
1008 'build a remote ui from ui or repo and opts'
1009 if util.safehasattr(src, 'baseui'): # looks like a repository
1009 if util.safehasattr(src, 'baseui'): # looks like a repository
1010 dst = src.baseui.copy() # drop repo-specific config
1010 dst = src.baseui.copy() # drop repo-specific config
1011 src = src.ui # copy target options from repo
1011 src = src.ui # copy target options from repo
1012 else: # assume it's a global ui object
1012 else: # assume it's a global ui object
1013 dst = src.copy() # keep all global options
1013 dst = src.copy() # keep all global options
1014
1014
1015 # copy ssh-specific options
1015 # copy ssh-specific options
1016 for o in 'ssh', 'remotecmd':
1016 for o in 'ssh', 'remotecmd':
1017 v = opts.get(o) or src.config('ui', o)
1017 v = opts.get(o) or src.config('ui', o)
1018 if v:
1018 if v:
1019 dst.setconfig("ui", o, v, 'copied')
1019 dst.setconfig("ui", o, v, 'copied')
1020
1020
1021 # copy bundle-specific options
1021 # copy bundle-specific options
1022 r = src.config('bundle', 'mainreporoot')
1022 r = src.config('bundle', 'mainreporoot')
1023 if r:
1023 if r:
1024 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1024 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1025
1025
1026 # copy selected local settings to the remote ui
1026 # copy selected local settings to the remote ui
1027 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1027 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1028 for key, val in src.configitems(sect):
1028 for key, val in src.configitems(sect):
1029 dst.setconfig(sect, key, val, 'copied')
1029 dst.setconfig(sect, key, val, 'copied')
1030 v = src.config('web', 'cacerts')
1030 v = src.config('web', 'cacerts')
1031 if v:
1031 if v:
1032 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1032 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1033
1033
1034 return dst
1034 return dst
1035
1035
1036 # Files of interest
1036 # Files of interest
1037 # Used to check if the repository has changed looking at mtime and size of
1037 # Used to check if the repository has changed looking at mtime and size of
1038 # these files.
1038 # these files.
1039 foi = [('spath', '00changelog.i'),
1039 foi = [('spath', '00changelog.i'),
1040 ('spath', 'phaseroots'), # ! phase can change content at the same size
1040 ('spath', 'phaseroots'), # ! phase can change content at the same size
1041 ('spath', 'obsstore'),
1041 ('spath', 'obsstore'),
1042 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1042 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1043 ]
1043 ]
1044
1044
1045 class cachedlocalrepo(object):
1045 class cachedlocalrepo(object):
1046 """Holds a localrepository that can be cached and reused."""
1046 """Holds a localrepository that can be cached and reused."""
1047
1047
1048 def __init__(self, repo):
1048 def __init__(self, repo):
1049 """Create a new cached repo from an existing repo.
1049 """Create a new cached repo from an existing repo.
1050
1050
1051 We assume the passed in repo was recently created. If the
1051 We assume the passed in repo was recently created. If the
1052 repo has changed between when it was created and when it was
1052 repo has changed between when it was created and when it was
1053 turned into a cache, it may not refresh properly.
1053 turned into a cache, it may not refresh properly.
1054 """
1054 """
1055 assert isinstance(repo, localrepo.localrepository)
1055 assert isinstance(repo, localrepo.localrepository)
1056 self._repo = repo
1056 self._repo = repo
1057 self._state, self.mtime = self._repostate()
1057 self._state, self.mtime = self._repostate()
1058 self._filtername = repo.filtername
1058 self._filtername = repo.filtername
1059
1059
1060 def fetch(self):
1060 def fetch(self):
1061 """Refresh (if necessary) and return a repository.
1061 """Refresh (if necessary) and return a repository.
1062
1062
1063 If the cached instance is out of date, it will be recreated
1063 If the cached instance is out of date, it will be recreated
1064 automatically and returned.
1064 automatically and returned.
1065
1065
1066 Returns a tuple of the repo and a boolean indicating whether a new
1066 Returns a tuple of the repo and a boolean indicating whether a new
1067 repo instance was created.
1067 repo instance was created.
1068 """
1068 """
1069 # We compare the mtimes and sizes of some well-known files to
1069 # We compare the mtimes and sizes of some well-known files to
1070 # determine if the repo changed. This is not precise, as mtimes
1070 # determine if the repo changed. This is not precise, as mtimes
1071 # are susceptible to clock skew and imprecise filesystems and
1071 # are susceptible to clock skew and imprecise filesystems and
1072 # file content can change while maintaining the same size.
1072 # file content can change while maintaining the same size.
1073
1073
1074 state, mtime = self._repostate()
1074 state, mtime = self._repostate()
1075 if state == self._state:
1075 if state == self._state:
1076 return self._repo, False
1076 return self._repo, False
1077
1077
1078 repo = repository(self._repo.baseui, self._repo.url())
1078 repo = repository(self._repo.baseui, self._repo.url())
1079 if self._filtername:
1079 if self._filtername:
1080 self._repo = repo.filtered(self._filtername)
1080 self._repo = repo.filtered(self._filtername)
1081 else:
1081 else:
1082 self._repo = repo.unfiltered()
1082 self._repo = repo.unfiltered()
1083 self._state = state
1083 self._state = state
1084 self.mtime = mtime
1084 self.mtime = mtime
1085
1085
1086 return self._repo, True
1086 return self._repo, True
1087
1087
1088 def _repostate(self):
1088 def _repostate(self):
1089 state = []
1089 state = []
1090 maxmtime = -1
1090 maxmtime = -1
1091 for attr, fname in foi:
1091 for attr, fname in foi:
1092 prefix = getattr(self._repo, attr)
1092 prefix = getattr(self._repo, attr)
1093 p = os.path.join(prefix, fname)
1093 p = os.path.join(prefix, fname)
1094 try:
1094 try:
1095 st = os.stat(p)
1095 st = os.stat(p)
1096 except OSError:
1096 except OSError:
1097 st = os.stat(prefix)
1097 st = os.stat(prefix)
1098 state.append((st.st_mtime, st.st_size))
1098 state.append((st.st_mtime, st.st_size))
1099 maxmtime = max(maxmtime, st.st_mtime)
1099 maxmtime = max(maxmtime, st.st_mtime)
1100
1100
1101 return tuple(state), maxmtime
1101 return tuple(state), maxmtime
1102
1102
1103 def copy(self):
1103 def copy(self):
1104 """Obtain a copy of this class instance.
1104 """Obtain a copy of this class instance.
1105
1105
1106 A new localrepository instance is obtained. The new instance should be
1106 A new localrepository instance is obtained. The new instance should be
1107 completely independent of the original.
1107 completely independent of the original.
1108 """
1108 """
1109 repo = repository(self._repo.baseui, self._repo.origroot)
1109 repo = repository(self._repo.baseui, self._repo.origroot)
1110 if self._filtername:
1110 if self._filtername:
1111 repo = repo.filtered(self._filtername)
1111 repo = repo.filtered(self._filtername)
1112 else:
1112 else:
1113 repo = repo.unfiltered()
1113 repo = repo.unfiltered()
1114 c = cachedlocalrepo(repo)
1114 c = cachedlocalrepo(repo)
1115 c._state = self._state
1115 c._state = self._state
1116 c.mtime = self.mtime
1116 c.mtime = self.mtime
1117 return c
1117 return c
General Comments 0
You need to be logged in to leave comments. Login now