##// END OF EJS Templates
share: use context manager or utility function to write file
Yuya Nishihara -
r35637:c751b9fd default
parent child Browse files
Show More
@@ -1,1115 +1,1112 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 logexchange,
31 logexchange,
32 merge as mergemod,
32 merge as mergemod,
33 node,
33 node,
34 phases,
34 phases,
35 repoview,
35 repoview,
36 scmutil,
36 scmutil,
37 sshpeer,
37 sshpeer,
38 statichttprepo,
38 statichttprepo,
39 ui as uimod,
39 ui as uimod,
40 unionrepo,
40 unionrepo,
41 url,
41 url,
42 util,
42 util,
43 verify as verifymod,
43 verify as verifymod,
44 vfs as vfsmod,
44 vfs as vfsmod,
45 )
45 )
46
46
47 release = lock.release
47 release = lock.release
48
48
49 # shared features
49 # shared features
50 sharedbookmarks = 'bookmarks'
50 sharedbookmarks = 'bookmarks'
51
51
52 def _local(path):
52 def _local(path):
53 path = util.expandpath(util.urllocalpath(path))
53 path = util.expandpath(util.urllocalpath(path))
54 return (os.path.isfile(path) and bundlerepo or localrepo)
54 return (os.path.isfile(path) and bundlerepo or localrepo)
55
55
56 def addbranchrevs(lrepo, other, branches, revs):
56 def addbranchrevs(lrepo, other, branches, revs):
57 peer = other.peer() # a courtesy to callers using a localrepo for other
57 peer = other.peer() # a courtesy to callers using a localrepo for other
58 hashbranch, branches = branches
58 hashbranch, branches = branches
59 if not hashbranch and not branches:
59 if not hashbranch and not branches:
60 x = revs or None
60 x = revs or None
61 if util.safehasattr(revs, 'first'):
61 if util.safehasattr(revs, 'first'):
62 y = revs.first()
62 y = revs.first()
63 elif revs:
63 elif revs:
64 y = revs[0]
64 y = revs[0]
65 else:
65 else:
66 y = None
66 y = None
67 return x, y
67 return x, y
68 if revs:
68 if revs:
69 revs = list(revs)
69 revs = list(revs)
70 else:
70 else:
71 revs = []
71 revs = []
72
72
73 if not peer.capable('branchmap'):
73 if not peer.capable('branchmap'):
74 if branches:
74 if branches:
75 raise error.Abort(_("remote branch lookup not supported"))
75 raise error.Abort(_("remote branch lookup not supported"))
76 revs.append(hashbranch)
76 revs.append(hashbranch)
77 return revs, revs[0]
77 return revs, revs[0]
78 branchmap = peer.branchmap()
78 branchmap = peer.branchmap()
79
79
80 def primary(branch):
80 def primary(branch):
81 if branch == '.':
81 if branch == '.':
82 if not lrepo:
82 if not lrepo:
83 raise error.Abort(_("dirstate branch not accessible"))
83 raise error.Abort(_("dirstate branch not accessible"))
84 branch = lrepo.dirstate.branch()
84 branch = lrepo.dirstate.branch()
85 if branch in branchmap:
85 if branch in branchmap:
86 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
87 return True
87 return True
88 else:
88 else:
89 return False
89 return False
90
90
91 for branch in branches:
91 for branch in branches:
92 if not primary(branch):
92 if not primary(branch):
93 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
94 if hashbranch:
94 if hashbranch:
95 if not primary(hashbranch):
95 if not primary(hashbranch):
96 revs.append(hashbranch)
96 revs.append(hashbranch)
97 return revs, revs[0]
97 return revs, revs[0]
98
98
99 def parseurl(path, branches=None):
99 def parseurl(path, branches=None):
100 '''parse url#branch, returning (url, (branch, branches))'''
100 '''parse url#branch, returning (url, (branch, branches))'''
101
101
102 u = util.url(path)
102 u = util.url(path)
103 branch = None
103 branch = None
104 if u.fragment:
104 if u.fragment:
105 branch = u.fragment
105 branch = u.fragment
106 u.fragment = None
106 u.fragment = None
107 return bytes(u), (branch, branches or [])
107 return bytes(u), (branch, branches or [])
108
108
109 schemes = {
109 schemes = {
110 'bundle': bundlerepo,
110 'bundle': bundlerepo,
111 'union': unionrepo,
111 'union': unionrepo,
112 'file': _local,
112 'file': _local,
113 'http': httppeer,
113 'http': httppeer,
114 'https': httppeer,
114 'https': httppeer,
115 'ssh': sshpeer,
115 'ssh': sshpeer,
116 'static-http': statichttprepo,
116 'static-http': statichttprepo,
117 }
117 }
118
118
119 def _peerlookup(path):
119 def _peerlookup(path):
120 u = util.url(path)
120 u = util.url(path)
121 scheme = u.scheme or 'file'
121 scheme = u.scheme or 'file'
122 thing = schemes.get(scheme) or schemes['file']
122 thing = schemes.get(scheme) or schemes['file']
123 try:
123 try:
124 return thing(path)
124 return thing(path)
125 except TypeError:
125 except TypeError:
126 # we can't test callable(thing) because 'thing' can be an unloaded
126 # we can't test callable(thing) because 'thing' can be an unloaded
127 # module that implements __call__
127 # module that implements __call__
128 if not util.safehasattr(thing, 'instance'):
128 if not util.safehasattr(thing, 'instance'):
129 raise
129 raise
130 return thing
130 return thing
131
131
132 def islocal(repo):
132 def islocal(repo):
133 '''return true if repo (or path pointing to repo) is local'''
133 '''return true if repo (or path pointing to repo) is local'''
134 if isinstance(repo, bytes):
134 if isinstance(repo, bytes):
135 try:
135 try:
136 return _peerlookup(repo).islocal(repo)
136 return _peerlookup(repo).islocal(repo)
137 except AttributeError:
137 except AttributeError:
138 return False
138 return False
139 return repo.local()
139 return repo.local()
140
140
141 def openpath(ui, path):
141 def openpath(ui, path):
142 '''open path with open if local, url.open if remote'''
142 '''open path with open if local, url.open if remote'''
143 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 pathurl = util.url(path, parsequery=False, parsefragment=False)
144 if pathurl.islocal():
144 if pathurl.islocal():
145 return util.posixfile(pathurl.localpath(), 'rb')
145 return util.posixfile(pathurl.localpath(), 'rb')
146 else:
146 else:
147 return url.open(ui, path)
147 return url.open(ui, path)
148
148
149 # a list of (ui, repo) functions called for wire peer initialization
149 # a list of (ui, repo) functions called for wire peer initialization
150 wirepeersetupfuncs = []
150 wirepeersetupfuncs = []
151
151
152 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
153 """return a repository object for the specified path"""
153 """return a repository object for the specified path"""
154 obj = _peerlookup(path).instance(ui, path, create)
154 obj = _peerlookup(path).instance(ui, path, create)
155 ui = getattr(obj, "ui", ui)
155 ui = getattr(obj, "ui", ui)
156 for f in presetupfuncs or []:
156 for f in presetupfuncs or []:
157 f(ui, obj)
157 f(ui, obj)
158 for name, module in extensions.extensions(ui):
158 for name, module in extensions.extensions(ui):
159 hook = getattr(module, 'reposetup', None)
159 hook = getattr(module, 'reposetup', None)
160 if hook:
160 if hook:
161 hook(ui, obj)
161 hook(ui, obj)
162 if not obj.local():
162 if not obj.local():
163 for f in wirepeersetupfuncs:
163 for f in wirepeersetupfuncs:
164 f(ui, obj)
164 f(ui, obj)
165 return obj
165 return obj
166
166
167 def repository(ui, path='', create=False, presetupfuncs=None):
167 def repository(ui, path='', create=False, presetupfuncs=None):
168 """return a repository object for the specified path"""
168 """return a repository object for the specified path"""
169 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
170 repo = peer.local()
170 repo = peer.local()
171 if not repo:
171 if not repo:
172 raise error.Abort(_("repository '%s' is not local") %
172 raise error.Abort(_("repository '%s' is not local") %
173 (path or peer.url()))
173 (path or peer.url()))
174 return repo.filtered('visible')
174 return repo.filtered('visible')
175
175
176 def peer(uiorrepo, opts, path, create=False):
176 def peer(uiorrepo, opts, path, create=False):
177 '''return a repository peer for the specified path'''
177 '''return a repository peer for the specified path'''
178 rui = remoteui(uiorrepo, opts)
178 rui = remoteui(uiorrepo, opts)
179 return _peerorrepo(rui, path, create).peer()
179 return _peerorrepo(rui, path, create).peer()
180
180
181 def defaultdest(source):
181 def defaultdest(source):
182 '''return default destination of clone if none is given
182 '''return default destination of clone if none is given
183
183
184 >>> defaultdest(b'foo')
184 >>> defaultdest(b'foo')
185 'foo'
185 'foo'
186 >>> defaultdest(b'/foo/bar')
186 >>> defaultdest(b'/foo/bar')
187 'bar'
187 'bar'
188 >>> defaultdest(b'/')
188 >>> defaultdest(b'/')
189 ''
189 ''
190 >>> defaultdest(b'')
190 >>> defaultdest(b'')
191 ''
191 ''
192 >>> defaultdest(b'http://example.org/')
192 >>> defaultdest(b'http://example.org/')
193 ''
193 ''
194 >>> defaultdest(b'http://example.org/foo/')
194 >>> defaultdest(b'http://example.org/foo/')
195 'foo'
195 'foo'
196 '''
196 '''
197 path = util.url(source).path
197 path = util.url(source).path
198 if not path:
198 if not path:
199 return ''
199 return ''
200 return os.path.basename(os.path.normpath(path))
200 return os.path.basename(os.path.normpath(path))
201
201
202 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
203 relative=False):
203 relative=False):
204 '''create a shared repository'''
204 '''create a shared repository'''
205
205
206 if not islocal(source):
206 if not islocal(source):
207 raise error.Abort(_('can only share local repositories'))
207 raise error.Abort(_('can only share local repositories'))
208
208
209 if not dest:
209 if not dest:
210 dest = defaultdest(source)
210 dest = defaultdest(source)
211 else:
211 else:
212 dest = ui.expandpath(dest)
212 dest = ui.expandpath(dest)
213
213
214 if isinstance(source, str):
214 if isinstance(source, str):
215 origsource = ui.expandpath(source)
215 origsource = ui.expandpath(source)
216 source, branches = parseurl(origsource)
216 source, branches = parseurl(origsource)
217 srcrepo = repository(ui, source)
217 srcrepo = repository(ui, source)
218 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
219 else:
219 else:
220 srcrepo = source.local()
220 srcrepo = source.local()
221 origsource = source = srcrepo.url()
221 origsource = source = srcrepo.url()
222 checkout = None
222 checkout = None
223
223
224 sharedpath = srcrepo.sharedpath # if our source is already sharing
224 sharedpath = srcrepo.sharedpath # if our source is already sharing
225
225
226 destwvfs = vfsmod.vfs(dest, realpath=True)
226 destwvfs = vfsmod.vfs(dest, realpath=True)
227 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
228
228
229 if destvfs.lexists():
229 if destvfs.lexists():
230 raise error.Abort(_('destination already exists'))
230 raise error.Abort(_('destination already exists'))
231
231
232 if not destwvfs.isdir():
232 if not destwvfs.isdir():
233 destwvfs.mkdir()
233 destwvfs.mkdir()
234 destvfs.makedir()
234 destvfs.makedir()
235
235
236 requirements = ''
236 requirements = ''
237 try:
237 try:
238 requirements = srcrepo.vfs.read('requires')
238 requirements = srcrepo.vfs.read('requires')
239 except IOError as inst:
239 except IOError as inst:
240 if inst.errno != errno.ENOENT:
240 if inst.errno != errno.ENOENT:
241 raise
241 raise
242
242
243 if relative:
243 if relative:
244 try:
244 try:
245 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 sharedpath = os.path.relpath(sharedpath, destvfs.base)
246 requirements += 'relshared\n'
246 requirements += 'relshared\n'
247 except (IOError, ValueError) as e:
247 except (IOError, ValueError) as e:
248 # ValueError is raised on Windows if the drive letters differ on
248 # ValueError is raised on Windows if the drive letters differ on
249 # each path
249 # each path
250 raise error.Abort(_('cannot calculate relative path'),
250 raise error.Abort(_('cannot calculate relative path'),
251 hint=str(e))
251 hint=str(e))
252 else:
252 else:
253 requirements += 'shared\n'
253 requirements += 'shared\n'
254
254
255 destvfs.write('requires', requirements)
255 destvfs.write('requires', requirements)
256 destvfs.write('sharedpath', sharedpath)
256 destvfs.write('sharedpath', sharedpath)
257
257
258 r = repository(ui, destwvfs.base)
258 r = repository(ui, destwvfs.base)
259 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
259 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
260 _postshareupdate(r, update, checkout=checkout)
260 _postshareupdate(r, update, checkout=checkout)
261 return r
261 return r
262
262
263 def unshare(ui, repo):
263 def unshare(ui, repo):
264 """convert a shared repository to a normal one
264 """convert a shared repository to a normal one
265
265
266 Copy the store data to the repo and remove the sharedpath data.
266 Copy the store data to the repo and remove the sharedpath data.
267 """
267 """
268
268
269 destlock = lock = None
269 destlock = lock = None
270 lock = repo.lock()
270 lock = repo.lock()
271 try:
271 try:
272 # we use locks here because if we race with commit, we
272 # we use locks here because if we race with commit, we
273 # can end up with extra data in the cloned revlogs that's
273 # can end up with extra data in the cloned revlogs that's
274 # not pointed to by changesets, thus causing verify to
274 # not pointed to by changesets, thus causing verify to
275 # fail
275 # fail
276
276
277 destlock = copystore(ui, repo, repo.path)
277 destlock = copystore(ui, repo, repo.path)
278
278
279 sharefile = repo.vfs.join('sharedpath')
279 sharefile = repo.vfs.join('sharedpath')
280 util.rename(sharefile, sharefile + '.old')
280 util.rename(sharefile, sharefile + '.old')
281
281
282 repo.requirements.discard('shared')
282 repo.requirements.discard('shared')
283 repo.requirements.discard('relshared')
283 repo.requirements.discard('relshared')
284 repo._writerequirements()
284 repo._writerequirements()
285 finally:
285 finally:
286 destlock and destlock.release()
286 destlock and destlock.release()
287 lock and lock.release()
287 lock and lock.release()
288
288
289 # update store, spath, svfs and sjoin of repo
289 # update store, spath, svfs and sjoin of repo
290 repo.unfiltered().__init__(repo.baseui, repo.root)
290 repo.unfiltered().__init__(repo.baseui, repo.root)
291
291
292 # TODO: figure out how to access subrepos that exist, but were previously
292 # TODO: figure out how to access subrepos that exist, but were previously
293 # removed from .hgsub
293 # removed from .hgsub
294 c = repo['.']
294 c = repo['.']
295 subs = c.substate
295 subs = c.substate
296 for s in sorted(subs):
296 for s in sorted(subs):
297 c.sub(s).unshare()
297 c.sub(s).unshare()
298
298
299 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
299 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
300 """Called after a new shared repo is created.
300 """Called after a new shared repo is created.
301
301
302 The new repo only has a requirements file and pointer to the source.
302 The new repo only has a requirements file and pointer to the source.
303 This function configures additional shared data.
303 This function configures additional shared data.
304
304
305 Extensions can wrap this function and write additional entries to
305 Extensions can wrap this function and write additional entries to
306 destrepo/.hg/shared to indicate additional pieces of data to be shared.
306 destrepo/.hg/shared to indicate additional pieces of data to be shared.
307 """
307 """
308 default = defaultpath or sourcerepo.ui.config('paths', 'default')
308 default = defaultpath or sourcerepo.ui.config('paths', 'default')
309 if default:
309 if default:
310 fp = destrepo.vfs("hgrc", "w", text=True)
310 with destrepo.vfs("hgrc", "w", text=True) as fp:
311 fp.write("[paths]\n")
311 fp.write("[paths]\n")
312 fp.write("default = %s\n" % default)
312 fp.write("default = %s\n" % default)
313 fp.close()
314
313
315 with destrepo.wlock():
314 with destrepo.wlock():
316 if bookmarks:
315 if bookmarks:
317 fp = destrepo.vfs('shared', 'w')
316 destrepo.vfs.write('shared', sharedbookmarks + '\n')
318 fp.write(sharedbookmarks + '\n')
319 fp.close()
320
317
321 def _postshareupdate(repo, update, checkout=None):
318 def _postshareupdate(repo, update, checkout=None):
322 """Maybe perform a working directory update after a shared repo is created.
319 """Maybe perform a working directory update after a shared repo is created.
323
320
324 ``update`` can be a boolean or a revision to update to.
321 ``update`` can be a boolean or a revision to update to.
325 """
322 """
326 if not update:
323 if not update:
327 return
324 return
328
325
329 repo.ui.status(_("updating working directory\n"))
326 repo.ui.status(_("updating working directory\n"))
330 if update is not True:
327 if update is not True:
331 checkout = update
328 checkout = update
332 for test in (checkout, 'default', 'tip'):
329 for test in (checkout, 'default', 'tip'):
333 if test is None:
330 if test is None:
334 continue
331 continue
335 try:
332 try:
336 uprev = repo.lookup(test)
333 uprev = repo.lookup(test)
337 break
334 break
338 except error.RepoLookupError:
335 except error.RepoLookupError:
339 continue
336 continue
340 _update(repo, uprev)
337 _update(repo, uprev)
341
338
342 def copystore(ui, srcrepo, destpath):
339 def copystore(ui, srcrepo, destpath):
343 '''copy files from store of srcrepo in destpath
340 '''copy files from store of srcrepo in destpath
344
341
345 returns destlock
342 returns destlock
346 '''
343 '''
347 destlock = None
344 destlock = None
348 try:
345 try:
349 hardlink = None
346 hardlink = None
350 num = 0
347 num = 0
351 closetopic = [None]
348 closetopic = [None]
352 def prog(topic, pos):
349 def prog(topic, pos):
353 if pos is None:
350 if pos is None:
354 closetopic[0] = topic
351 closetopic[0] = topic
355 else:
352 else:
356 ui.progress(topic, pos + num)
353 ui.progress(topic, pos + num)
357 srcpublishing = srcrepo.publishing()
354 srcpublishing = srcrepo.publishing()
358 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
355 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
359 dstvfs = vfsmod.vfs(destpath)
356 dstvfs = vfsmod.vfs(destpath)
360 for f in srcrepo.store.copylist():
357 for f in srcrepo.store.copylist():
361 if srcpublishing and f.endswith('phaseroots'):
358 if srcpublishing and f.endswith('phaseroots'):
362 continue
359 continue
363 dstbase = os.path.dirname(f)
360 dstbase = os.path.dirname(f)
364 if dstbase and not dstvfs.exists(dstbase):
361 if dstbase and not dstvfs.exists(dstbase):
365 dstvfs.mkdir(dstbase)
362 dstvfs.mkdir(dstbase)
366 if srcvfs.exists(f):
363 if srcvfs.exists(f):
367 if f.endswith('data'):
364 if f.endswith('data'):
368 # 'dstbase' may be empty (e.g. revlog format 0)
365 # 'dstbase' may be empty (e.g. revlog format 0)
369 lockfile = os.path.join(dstbase, "lock")
366 lockfile = os.path.join(dstbase, "lock")
370 # lock to avoid premature writing to the target
367 # lock to avoid premature writing to the target
371 destlock = lock.lock(dstvfs, lockfile)
368 destlock = lock.lock(dstvfs, lockfile)
372 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
369 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
373 hardlink, progress=prog)
370 hardlink, progress=prog)
374 num += n
371 num += n
375 if hardlink:
372 if hardlink:
376 ui.debug("linked %d files\n" % num)
373 ui.debug("linked %d files\n" % num)
377 if closetopic[0]:
374 if closetopic[0]:
378 ui.progress(closetopic[0], None)
375 ui.progress(closetopic[0], None)
379 else:
376 else:
380 ui.debug("copied %d files\n" % num)
377 ui.debug("copied %d files\n" % num)
381 if closetopic[0]:
378 if closetopic[0]:
382 ui.progress(closetopic[0], None)
379 ui.progress(closetopic[0], None)
383 return destlock
380 return destlock
384 except: # re-raises
381 except: # re-raises
385 release(destlock)
382 release(destlock)
386 raise
383 raise
387
384
388 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
385 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
389 rev=None, update=True, stream=False):
386 rev=None, update=True, stream=False):
390 """Perform a clone using a shared repo.
387 """Perform a clone using a shared repo.
391
388
392 The store for the repository will be located at <sharepath>/.hg. The
389 The store for the repository will be located at <sharepath>/.hg. The
393 specified revisions will be cloned or pulled from "source". A shared repo
390 specified revisions will be cloned or pulled from "source". A shared repo
394 will be created at "dest" and a working copy will be created if "update" is
391 will be created at "dest" and a working copy will be created if "update" is
395 True.
392 True.
396 """
393 """
397 revs = None
394 revs = None
398 if rev:
395 if rev:
399 if not srcpeer.capable('lookup'):
396 if not srcpeer.capable('lookup'):
400 raise error.Abort(_("src repository does not support "
397 raise error.Abort(_("src repository does not support "
401 "revision lookup and so doesn't "
398 "revision lookup and so doesn't "
402 "support clone by revision"))
399 "support clone by revision"))
403 revs = [srcpeer.lookup(r) for r in rev]
400 revs = [srcpeer.lookup(r) for r in rev]
404
401
405 # Obtain a lock before checking for or cloning the pooled repo otherwise
402 # Obtain a lock before checking for or cloning the pooled repo otherwise
406 # 2 clients may race creating or populating it.
403 # 2 clients may race creating or populating it.
407 pooldir = os.path.dirname(sharepath)
404 pooldir = os.path.dirname(sharepath)
408 # lock class requires the directory to exist.
405 # lock class requires the directory to exist.
409 try:
406 try:
410 util.makedir(pooldir, False)
407 util.makedir(pooldir, False)
411 except OSError as e:
408 except OSError as e:
412 if e.errno != errno.EEXIST:
409 if e.errno != errno.EEXIST:
413 raise
410 raise
414
411
415 poolvfs = vfsmod.vfs(pooldir)
412 poolvfs = vfsmod.vfs(pooldir)
416 basename = os.path.basename(sharepath)
413 basename = os.path.basename(sharepath)
417
414
418 with lock.lock(poolvfs, '%s.lock' % basename):
415 with lock.lock(poolvfs, '%s.lock' % basename):
419 if os.path.exists(sharepath):
416 if os.path.exists(sharepath):
420 ui.status(_('(sharing from existing pooled repository %s)\n') %
417 ui.status(_('(sharing from existing pooled repository %s)\n') %
421 basename)
418 basename)
422 else:
419 else:
423 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
420 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
424 # Always use pull mode because hardlinks in share mode don't work
421 # Always use pull mode because hardlinks in share mode don't work
425 # well. Never update because working copies aren't necessary in
422 # well. Never update because working copies aren't necessary in
426 # share mode.
423 # share mode.
427 clone(ui, peeropts, source, dest=sharepath, pull=True,
424 clone(ui, peeropts, source, dest=sharepath, pull=True,
428 rev=rev, update=False, stream=stream)
425 rev=rev, update=False, stream=stream)
429
426
430 # Resolve the value to put in [paths] section for the source.
427 # Resolve the value to put in [paths] section for the source.
431 if islocal(source):
428 if islocal(source):
432 defaultpath = os.path.abspath(util.urllocalpath(source))
429 defaultpath = os.path.abspath(util.urllocalpath(source))
433 else:
430 else:
434 defaultpath = source
431 defaultpath = source
435
432
436 sharerepo = repository(ui, path=sharepath)
433 sharerepo = repository(ui, path=sharepath)
437 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
434 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
438 defaultpath=defaultpath)
435 defaultpath=defaultpath)
439
436
440 # We need to perform a pull against the dest repo to fetch bookmarks
437 # We need to perform a pull against the dest repo to fetch bookmarks
441 # and other non-store data that isn't shared by default. In the case of
438 # and other non-store data that isn't shared by default. In the case of
442 # non-existing shared repo, this means we pull from the remote twice. This
439 # non-existing shared repo, this means we pull from the remote twice. This
443 # is a bit weird. But at the time it was implemented, there wasn't an easy
440 # is a bit weird. But at the time it was implemented, there wasn't an easy
444 # way to pull just non-changegroup data.
441 # way to pull just non-changegroup data.
445 destrepo = repository(ui, path=dest)
442 destrepo = repository(ui, path=dest)
446 exchange.pull(destrepo, srcpeer, heads=revs)
443 exchange.pull(destrepo, srcpeer, heads=revs)
447
444
448 _postshareupdate(destrepo, update)
445 _postshareupdate(destrepo, update)
449
446
450 return srcpeer, peer(ui, peeropts, dest)
447 return srcpeer, peer(ui, peeropts, dest)
451
448
452 # Recomputing branch cache might be slow on big repos,
449 # Recomputing branch cache might be slow on big repos,
453 # so just copy it
450 # so just copy it
454 def _copycache(srcrepo, dstcachedir, fname):
451 def _copycache(srcrepo, dstcachedir, fname):
455 """copy a cache from srcrepo to destcachedir (if it exists)"""
452 """copy a cache from srcrepo to destcachedir (if it exists)"""
456 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
453 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
457 dstbranchcache = os.path.join(dstcachedir, fname)
454 dstbranchcache = os.path.join(dstcachedir, fname)
458 if os.path.exists(srcbranchcache):
455 if os.path.exists(srcbranchcache):
459 if not os.path.exists(dstcachedir):
456 if not os.path.exists(dstcachedir):
460 os.mkdir(dstcachedir)
457 os.mkdir(dstcachedir)
461 util.copyfile(srcbranchcache, dstbranchcache)
458 util.copyfile(srcbranchcache, dstbranchcache)
462
459
463 def _cachetocopy(srcrepo):
460 def _cachetocopy(srcrepo):
464 """return the list of cache file valuable to copy during a clone"""
461 """return the list of cache file valuable to copy during a clone"""
465 # In local clones we're copying all nodes, not just served
462 # In local clones we're copying all nodes, not just served
466 # ones. Therefore copy all branch caches over.
463 # ones. Therefore copy all branch caches over.
467 cachefiles = ['branch2']
464 cachefiles = ['branch2']
468 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
465 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
469 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
466 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
470 cachefiles += ['tags2']
467 cachefiles += ['tags2']
471 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
468 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
472 cachefiles += ['hgtagsfnodes1']
469 cachefiles += ['hgtagsfnodes1']
473 return cachefiles
470 return cachefiles
474
471
475 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
472 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
476 update=True, stream=False, branch=None, shareopts=None):
473 update=True, stream=False, branch=None, shareopts=None):
477 """Make a copy of an existing repository.
474 """Make a copy of an existing repository.
478
475
479 Create a copy of an existing repository in a new directory. The
476 Create a copy of an existing repository in a new directory. The
480 source and destination are URLs, as passed to the repository
477 source and destination are URLs, as passed to the repository
481 function. Returns a pair of repository peers, the source and
478 function. Returns a pair of repository peers, the source and
482 newly created destination.
479 newly created destination.
483
480
484 The location of the source is added to the new repository's
481 The location of the source is added to the new repository's
485 .hg/hgrc file, as the default to be used for future pulls and
482 .hg/hgrc file, as the default to be used for future pulls and
486 pushes.
483 pushes.
487
484
488 If an exception is raised, the partly cloned/updated destination
485 If an exception is raised, the partly cloned/updated destination
489 repository will be deleted.
486 repository will be deleted.
490
487
491 Arguments:
488 Arguments:
492
489
493 source: repository object or URL
490 source: repository object or URL
494
491
495 dest: URL of destination repository to create (defaults to base
492 dest: URL of destination repository to create (defaults to base
496 name of source repository)
493 name of source repository)
497
494
498 pull: always pull from source repository, even in local case or if the
495 pull: always pull from source repository, even in local case or if the
499 server prefers streaming
496 server prefers streaming
500
497
501 stream: stream raw data uncompressed from repository (fast over
498 stream: stream raw data uncompressed from repository (fast over
502 LAN, slow over WAN)
499 LAN, slow over WAN)
503
500
504 rev: revision to clone up to (implies pull=True)
501 rev: revision to clone up to (implies pull=True)
505
502
506 update: update working directory after clone completes, if
503 update: update working directory after clone completes, if
507 destination is local repository (True means update to default rev,
504 destination is local repository (True means update to default rev,
508 anything else is treated as a revision)
505 anything else is treated as a revision)
509
506
510 branch: branches to clone
507 branch: branches to clone
511
508
512 shareopts: dict of options to control auto sharing behavior. The "pool" key
509 shareopts: dict of options to control auto sharing behavior. The "pool" key
513 activates auto sharing mode and defines the directory for stores. The
510 activates auto sharing mode and defines the directory for stores. The
514 "mode" key determines how to construct the directory name of the shared
511 "mode" key determines how to construct the directory name of the shared
515 repository. "identity" means the name is derived from the node of the first
512 repository. "identity" means the name is derived from the node of the first
516 changeset in the repository. "remote" means the name is derived from the
513 changeset in the repository. "remote" means the name is derived from the
517 remote's path/URL. Defaults to "identity."
514 remote's path/URL. Defaults to "identity."
518 """
515 """
519
516
520 if isinstance(source, bytes):
517 if isinstance(source, bytes):
521 origsource = ui.expandpath(source)
518 origsource = ui.expandpath(source)
522 source, branch = parseurl(origsource, branch)
519 source, branch = parseurl(origsource, branch)
523 srcpeer = peer(ui, peeropts, source)
520 srcpeer = peer(ui, peeropts, source)
524 else:
521 else:
525 srcpeer = source.peer() # in case we were called with a localrepo
522 srcpeer = source.peer() # in case we were called with a localrepo
526 branch = (None, branch or [])
523 branch = (None, branch or [])
527 origsource = source = srcpeer.url()
524 origsource = source = srcpeer.url()
528 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
525 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
529
526
530 if dest is None:
527 if dest is None:
531 dest = defaultdest(source)
528 dest = defaultdest(source)
532 if dest:
529 if dest:
533 ui.status(_("destination directory: %s\n") % dest)
530 ui.status(_("destination directory: %s\n") % dest)
534 else:
531 else:
535 dest = ui.expandpath(dest)
532 dest = ui.expandpath(dest)
536
533
537 dest = util.urllocalpath(dest)
534 dest = util.urllocalpath(dest)
538 source = util.urllocalpath(source)
535 source = util.urllocalpath(source)
539
536
540 if not dest:
537 if not dest:
541 raise error.Abort(_("empty destination path is not valid"))
538 raise error.Abort(_("empty destination path is not valid"))
542
539
543 destvfs = vfsmod.vfs(dest, expandpath=True)
540 destvfs = vfsmod.vfs(dest, expandpath=True)
544 if destvfs.lexists():
541 if destvfs.lexists():
545 if not destvfs.isdir():
542 if not destvfs.isdir():
546 raise error.Abort(_("destination '%s' already exists") % dest)
543 raise error.Abort(_("destination '%s' already exists") % dest)
547 elif destvfs.listdir():
544 elif destvfs.listdir():
548 raise error.Abort(_("destination '%s' is not empty") % dest)
545 raise error.Abort(_("destination '%s' is not empty") % dest)
549
546
550 shareopts = shareopts or {}
547 shareopts = shareopts or {}
551 sharepool = shareopts.get('pool')
548 sharepool = shareopts.get('pool')
552 sharenamemode = shareopts.get('mode')
549 sharenamemode = shareopts.get('mode')
553 if sharepool and islocal(dest):
550 if sharepool and islocal(dest):
554 sharepath = None
551 sharepath = None
555 if sharenamemode == 'identity':
552 if sharenamemode == 'identity':
556 # Resolve the name from the initial changeset in the remote
553 # Resolve the name from the initial changeset in the remote
557 # repository. This returns nullid when the remote is empty. It
554 # repository. This returns nullid when the remote is empty. It
558 # raises RepoLookupError if revision 0 is filtered or otherwise
555 # raises RepoLookupError if revision 0 is filtered or otherwise
559 # not available. If we fail to resolve, sharing is not enabled.
556 # not available. If we fail to resolve, sharing is not enabled.
560 try:
557 try:
561 rootnode = srcpeer.lookup('0')
558 rootnode = srcpeer.lookup('0')
562 if rootnode != node.nullid:
559 if rootnode != node.nullid:
563 sharepath = os.path.join(sharepool, node.hex(rootnode))
560 sharepath = os.path.join(sharepool, node.hex(rootnode))
564 else:
561 else:
565 ui.status(_('(not using pooled storage: '
562 ui.status(_('(not using pooled storage: '
566 'remote appears to be empty)\n'))
563 'remote appears to be empty)\n'))
567 except error.RepoLookupError:
564 except error.RepoLookupError:
568 ui.status(_('(not using pooled storage: '
565 ui.status(_('(not using pooled storage: '
569 'unable to resolve identity of remote)\n'))
566 'unable to resolve identity of remote)\n'))
570 elif sharenamemode == 'remote':
567 elif sharenamemode == 'remote':
571 sharepath = os.path.join(
568 sharepath = os.path.join(
572 sharepool, node.hex(hashlib.sha1(source).digest()))
569 sharepool, node.hex(hashlib.sha1(source).digest()))
573 else:
570 else:
574 raise error.Abort(_('unknown share naming mode: %s') %
571 raise error.Abort(_('unknown share naming mode: %s') %
575 sharenamemode)
572 sharenamemode)
576
573
577 if sharepath:
574 if sharepath:
578 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
575 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
579 dest, pull=pull, rev=rev, update=update,
576 dest, pull=pull, rev=rev, update=update,
580 stream=stream)
577 stream=stream)
581
578
582 srclock = destlock = cleandir = None
579 srclock = destlock = cleandir = None
583 srcrepo = srcpeer.local()
580 srcrepo = srcpeer.local()
584 try:
581 try:
585 abspath = origsource
582 abspath = origsource
586 if islocal(origsource):
583 if islocal(origsource):
587 abspath = os.path.abspath(util.urllocalpath(origsource))
584 abspath = os.path.abspath(util.urllocalpath(origsource))
588
585
589 if islocal(dest):
586 if islocal(dest):
590 cleandir = dest
587 cleandir = dest
591
588
592 copy = False
589 copy = False
593 if (srcrepo and srcrepo.cancopy() and islocal(dest)
590 if (srcrepo and srcrepo.cancopy() and islocal(dest)
594 and not phases.hassecret(srcrepo)):
591 and not phases.hassecret(srcrepo)):
595 copy = not pull and not rev
592 copy = not pull and not rev
596
593
597 if copy:
594 if copy:
598 try:
595 try:
599 # we use a lock here because if we race with commit, we
596 # we use a lock here because if we race with commit, we
600 # can end up with extra data in the cloned revlogs that's
597 # can end up with extra data in the cloned revlogs that's
601 # not pointed to by changesets, thus causing verify to
598 # not pointed to by changesets, thus causing verify to
602 # fail
599 # fail
603 srclock = srcrepo.lock(wait=False)
600 srclock = srcrepo.lock(wait=False)
604 except error.LockError:
601 except error.LockError:
605 copy = False
602 copy = False
606
603
607 if copy:
604 if copy:
608 srcrepo.hook('preoutgoing', throw=True, source='clone')
605 srcrepo.hook('preoutgoing', throw=True, source='clone')
609 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
606 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
610 if not os.path.exists(dest):
607 if not os.path.exists(dest):
611 os.mkdir(dest)
608 os.mkdir(dest)
612 else:
609 else:
613 # only clean up directories we create ourselves
610 # only clean up directories we create ourselves
614 cleandir = hgdir
611 cleandir = hgdir
615 try:
612 try:
616 destpath = hgdir
613 destpath = hgdir
617 util.makedir(destpath, notindexed=True)
614 util.makedir(destpath, notindexed=True)
618 except OSError as inst:
615 except OSError as inst:
619 if inst.errno == errno.EEXIST:
616 if inst.errno == errno.EEXIST:
620 cleandir = None
617 cleandir = None
621 raise error.Abort(_("destination '%s' already exists")
618 raise error.Abort(_("destination '%s' already exists")
622 % dest)
619 % dest)
623 raise
620 raise
624
621
625 destlock = copystore(ui, srcrepo, destpath)
622 destlock = copystore(ui, srcrepo, destpath)
626 # copy bookmarks over
623 # copy bookmarks over
627 srcbookmarks = srcrepo.vfs.join('bookmarks')
624 srcbookmarks = srcrepo.vfs.join('bookmarks')
628 dstbookmarks = os.path.join(destpath, 'bookmarks')
625 dstbookmarks = os.path.join(destpath, 'bookmarks')
629 if os.path.exists(srcbookmarks):
626 if os.path.exists(srcbookmarks):
630 util.copyfile(srcbookmarks, dstbookmarks)
627 util.copyfile(srcbookmarks, dstbookmarks)
631
628
632 dstcachedir = os.path.join(destpath, 'cache')
629 dstcachedir = os.path.join(destpath, 'cache')
633 for cache in _cachetocopy(srcrepo):
630 for cache in _cachetocopy(srcrepo):
634 _copycache(srcrepo, dstcachedir, cache)
631 _copycache(srcrepo, dstcachedir, cache)
635
632
636 # we need to re-init the repo after manually copying the data
633 # we need to re-init the repo after manually copying the data
637 # into it
634 # into it
638 destpeer = peer(srcrepo, peeropts, dest)
635 destpeer = peer(srcrepo, peeropts, dest)
639 srcrepo.hook('outgoing', source='clone',
636 srcrepo.hook('outgoing', source='clone',
640 node=node.hex(node.nullid))
637 node=node.hex(node.nullid))
641 else:
638 else:
642 try:
639 try:
643 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
640 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
644 # only pass ui when no srcrepo
641 # only pass ui when no srcrepo
645 except OSError as inst:
642 except OSError as inst:
646 if inst.errno == errno.EEXIST:
643 if inst.errno == errno.EEXIST:
647 cleandir = None
644 cleandir = None
648 raise error.Abort(_("destination '%s' already exists")
645 raise error.Abort(_("destination '%s' already exists")
649 % dest)
646 % dest)
650 raise
647 raise
651
648
652 revs = None
649 revs = None
653 if rev:
650 if rev:
654 if not srcpeer.capable('lookup'):
651 if not srcpeer.capable('lookup'):
655 raise error.Abort(_("src repository does not support "
652 raise error.Abort(_("src repository does not support "
656 "revision lookup and so doesn't "
653 "revision lookup and so doesn't "
657 "support clone by revision"))
654 "support clone by revision"))
658 revs = [srcpeer.lookup(r) for r in rev]
655 revs = [srcpeer.lookup(r) for r in rev]
659 checkout = revs[0]
656 checkout = revs[0]
660 local = destpeer.local()
657 local = destpeer.local()
661 if local:
658 if local:
662 u = util.url(abspath)
659 u = util.url(abspath)
663 defaulturl = bytes(u)
660 defaulturl = bytes(u)
664 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
661 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
665 if not stream:
662 if not stream:
666 if pull:
663 if pull:
667 stream = False
664 stream = False
668 else:
665 else:
669 stream = None
666 stream = None
670 # internal config: ui.quietbookmarkmove
667 # internal config: ui.quietbookmarkmove
671 overrides = {('ui', 'quietbookmarkmove'): True}
668 overrides = {('ui', 'quietbookmarkmove'): True}
672 with local.ui.configoverride(overrides, 'clone'):
669 with local.ui.configoverride(overrides, 'clone'):
673 exchange.pull(local, srcpeer, revs,
670 exchange.pull(local, srcpeer, revs,
674 streamclonerequested=stream)
671 streamclonerequested=stream)
675 elif srcrepo:
672 elif srcrepo:
676 exchange.push(srcrepo, destpeer, revs=revs,
673 exchange.push(srcrepo, destpeer, revs=revs,
677 bookmarks=srcrepo._bookmarks.keys())
674 bookmarks=srcrepo._bookmarks.keys())
678 else:
675 else:
679 raise error.Abort(_("clone from remote to remote not supported")
676 raise error.Abort(_("clone from remote to remote not supported")
680 )
677 )
681
678
682 cleandir = None
679 cleandir = None
683
680
684 destrepo = destpeer.local()
681 destrepo = destpeer.local()
685 if destrepo:
682 if destrepo:
686 template = uimod.samplehgrcs['cloned']
683 template = uimod.samplehgrcs['cloned']
687 fp = destrepo.vfs("hgrc", "wb")
684 fp = destrepo.vfs("hgrc", "wb")
688 u = util.url(abspath)
685 u = util.url(abspath)
689 u.passwd = None
686 u.passwd = None
690 defaulturl = bytes(u)
687 defaulturl = bytes(u)
691 fp.write(util.tonativeeol(template % defaulturl))
688 fp.write(util.tonativeeol(template % defaulturl))
692 fp.close()
689 fp.close()
693
690
694 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
691 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
695
692
696 if ui.configbool('experimental', 'remotenames'):
693 if ui.configbool('experimental', 'remotenames'):
697 logexchange.pullremotenames(destrepo, srcpeer)
694 logexchange.pullremotenames(destrepo, srcpeer)
698
695
699 if update:
696 if update:
700 if update is not True:
697 if update is not True:
701 checkout = srcpeer.lookup(update)
698 checkout = srcpeer.lookup(update)
702 uprev = None
699 uprev = None
703 status = None
700 status = None
704 if checkout is not None:
701 if checkout is not None:
705 try:
702 try:
706 uprev = destrepo.lookup(checkout)
703 uprev = destrepo.lookup(checkout)
707 except error.RepoLookupError:
704 except error.RepoLookupError:
708 if update is not True:
705 if update is not True:
709 try:
706 try:
710 uprev = destrepo.lookup(update)
707 uprev = destrepo.lookup(update)
711 except error.RepoLookupError:
708 except error.RepoLookupError:
712 pass
709 pass
713 if uprev is None:
710 if uprev is None:
714 try:
711 try:
715 uprev = destrepo._bookmarks['@']
712 uprev = destrepo._bookmarks['@']
716 update = '@'
713 update = '@'
717 bn = destrepo[uprev].branch()
714 bn = destrepo[uprev].branch()
718 if bn == 'default':
715 if bn == 'default':
719 status = _("updating to bookmark @\n")
716 status = _("updating to bookmark @\n")
720 else:
717 else:
721 status = (_("updating to bookmark @ on branch %s\n")
718 status = (_("updating to bookmark @ on branch %s\n")
722 % bn)
719 % bn)
723 except KeyError:
720 except KeyError:
724 try:
721 try:
725 uprev = destrepo.branchtip('default')
722 uprev = destrepo.branchtip('default')
726 except error.RepoLookupError:
723 except error.RepoLookupError:
727 uprev = destrepo.lookup('tip')
724 uprev = destrepo.lookup('tip')
728 if not status:
725 if not status:
729 bn = destrepo[uprev].branch()
726 bn = destrepo[uprev].branch()
730 status = _("updating to branch %s\n") % bn
727 status = _("updating to branch %s\n") % bn
731 destrepo.ui.status(status)
728 destrepo.ui.status(status)
732 _update(destrepo, uprev)
729 _update(destrepo, uprev)
733 if update in destrepo._bookmarks:
730 if update in destrepo._bookmarks:
734 bookmarks.activate(destrepo, update)
731 bookmarks.activate(destrepo, update)
735 finally:
732 finally:
736 release(srclock, destlock)
733 release(srclock, destlock)
737 if cleandir is not None:
734 if cleandir is not None:
738 shutil.rmtree(cleandir, True)
735 shutil.rmtree(cleandir, True)
739 if srcpeer is not None:
736 if srcpeer is not None:
740 srcpeer.close()
737 srcpeer.close()
741 return srcpeer, destpeer
738 return srcpeer, destpeer
742
739
743 def _showstats(repo, stats, quietempty=False):
740 def _showstats(repo, stats, quietempty=False):
744 if quietempty and not any(stats):
741 if quietempty and not any(stats):
745 return
742 return
746 repo.ui.status(_("%d files updated, %d files merged, "
743 repo.ui.status(_("%d files updated, %d files merged, "
747 "%d files removed, %d files unresolved\n") % stats)
744 "%d files removed, %d files unresolved\n") % stats)
748
745
749 def updaterepo(repo, node, overwrite, updatecheck=None):
746 def updaterepo(repo, node, overwrite, updatecheck=None):
750 """Update the working directory to node.
747 """Update the working directory to node.
751
748
752 When overwrite is set, changes are clobbered, merged else
749 When overwrite is set, changes are clobbered, merged else
753
750
754 returns stats (see pydoc mercurial.merge.applyupdates)"""
751 returns stats (see pydoc mercurial.merge.applyupdates)"""
755 return mergemod.update(repo, node, False, overwrite,
752 return mergemod.update(repo, node, False, overwrite,
756 labels=['working copy', 'destination'],
753 labels=['working copy', 'destination'],
757 updatecheck=updatecheck)
754 updatecheck=updatecheck)
758
755
759 def update(repo, node, quietempty=False, updatecheck=None):
756 def update(repo, node, quietempty=False, updatecheck=None):
760 """update the working directory to node"""
757 """update the working directory to node"""
761 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
758 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
762 _showstats(repo, stats, quietempty)
759 _showstats(repo, stats, quietempty)
763 if stats[3]:
760 if stats[3]:
764 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
761 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
765 return stats[3] > 0
762 return stats[3] > 0
766
763
767 # naming conflict in clone()
764 # naming conflict in clone()
768 _update = update
765 _update = update
769
766
770 def clean(repo, node, show_stats=True, quietempty=False):
767 def clean(repo, node, show_stats=True, quietempty=False):
771 """forcibly switch the working directory to node, clobbering changes"""
768 """forcibly switch the working directory to node, clobbering changes"""
772 stats = updaterepo(repo, node, True)
769 stats = updaterepo(repo, node, True)
773 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
770 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
774 if show_stats:
771 if show_stats:
775 _showstats(repo, stats, quietempty)
772 _showstats(repo, stats, quietempty)
776 return stats[3] > 0
773 return stats[3] > 0
777
774
778 # naming conflict in updatetotally()
775 # naming conflict in updatetotally()
779 _clean = clean
776 _clean = clean
780
777
781 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
778 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
782 """Update the working directory with extra care for non-file components
779 """Update the working directory with extra care for non-file components
783
780
784 This takes care of non-file components below:
781 This takes care of non-file components below:
785
782
786 :bookmark: might be advanced or (in)activated
783 :bookmark: might be advanced or (in)activated
787
784
788 This takes arguments below:
785 This takes arguments below:
789
786
790 :checkout: to which revision the working directory is updated
787 :checkout: to which revision the working directory is updated
791 :brev: a name, which might be a bookmark to be activated after updating
788 :brev: a name, which might be a bookmark to be activated after updating
792 :clean: whether changes in the working directory can be discarded
789 :clean: whether changes in the working directory can be discarded
793 :updatecheck: how to deal with a dirty working directory
790 :updatecheck: how to deal with a dirty working directory
794
791
795 Valid values for updatecheck are (None => linear):
792 Valid values for updatecheck are (None => linear):
796
793
797 * abort: abort if the working directory is dirty
794 * abort: abort if the working directory is dirty
798 * none: don't check (merge working directory changes into destination)
795 * none: don't check (merge working directory changes into destination)
799 * linear: check that update is linear before merging working directory
796 * linear: check that update is linear before merging working directory
800 changes into destination
797 changes into destination
801 * noconflict: check that the update does not result in file merges
798 * noconflict: check that the update does not result in file merges
802
799
803 This returns whether conflict is detected at updating or not.
800 This returns whether conflict is detected at updating or not.
804 """
801 """
805 if updatecheck is None:
802 if updatecheck is None:
806 updatecheck = ui.config('commands', 'update.check')
803 updatecheck = ui.config('commands', 'update.check')
807 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
804 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
808 # If not configured, or invalid value configured
805 # If not configured, or invalid value configured
809 updatecheck = 'linear'
806 updatecheck = 'linear'
810 with repo.wlock():
807 with repo.wlock():
811 movemarkfrom = None
808 movemarkfrom = None
812 warndest = False
809 warndest = False
813 if checkout is None:
810 if checkout is None:
814 updata = destutil.destupdate(repo, clean=clean)
811 updata = destutil.destupdate(repo, clean=clean)
815 checkout, movemarkfrom, brev = updata
812 checkout, movemarkfrom, brev = updata
816 warndest = True
813 warndest = True
817
814
818 if clean:
815 if clean:
819 ret = _clean(repo, checkout)
816 ret = _clean(repo, checkout)
820 else:
817 else:
821 if updatecheck == 'abort':
818 if updatecheck == 'abort':
822 cmdutil.bailifchanged(repo, merge=False)
819 cmdutil.bailifchanged(repo, merge=False)
823 updatecheck = 'none'
820 updatecheck = 'none'
824 ret = _update(repo, checkout, updatecheck=updatecheck)
821 ret = _update(repo, checkout, updatecheck=updatecheck)
825
822
826 if not ret and movemarkfrom:
823 if not ret and movemarkfrom:
827 if movemarkfrom == repo['.'].node():
824 if movemarkfrom == repo['.'].node():
828 pass # no-op update
825 pass # no-op update
829 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
826 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
830 b = ui.label(repo._activebookmark, 'bookmarks.active')
827 b = ui.label(repo._activebookmark, 'bookmarks.active')
831 ui.status(_("updating bookmark %s\n") % b)
828 ui.status(_("updating bookmark %s\n") % b)
832 else:
829 else:
833 # this can happen with a non-linear update
830 # this can happen with a non-linear update
834 b = ui.label(repo._activebookmark, 'bookmarks')
831 b = ui.label(repo._activebookmark, 'bookmarks')
835 ui.status(_("(leaving bookmark %s)\n") % b)
832 ui.status(_("(leaving bookmark %s)\n") % b)
836 bookmarks.deactivate(repo)
833 bookmarks.deactivate(repo)
837 elif brev in repo._bookmarks:
834 elif brev in repo._bookmarks:
838 if brev != repo._activebookmark:
835 if brev != repo._activebookmark:
839 b = ui.label(brev, 'bookmarks.active')
836 b = ui.label(brev, 'bookmarks.active')
840 ui.status(_("(activating bookmark %s)\n") % b)
837 ui.status(_("(activating bookmark %s)\n") % b)
841 bookmarks.activate(repo, brev)
838 bookmarks.activate(repo, brev)
842 elif brev:
839 elif brev:
843 if repo._activebookmark:
840 if repo._activebookmark:
844 b = ui.label(repo._activebookmark, 'bookmarks')
841 b = ui.label(repo._activebookmark, 'bookmarks')
845 ui.status(_("(leaving bookmark %s)\n") % b)
842 ui.status(_("(leaving bookmark %s)\n") % b)
846 bookmarks.deactivate(repo)
843 bookmarks.deactivate(repo)
847
844
848 if warndest:
845 if warndest:
849 destutil.statusotherdests(ui, repo)
846 destutil.statusotherdests(ui, repo)
850
847
851 return ret
848 return ret
852
849
853 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
850 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
854 """Branch merge with node, resolving changes. Return true if any
851 """Branch merge with node, resolving changes. Return true if any
855 unresolved conflicts."""
852 unresolved conflicts."""
856 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
853 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
857 labels=labels)
854 labels=labels)
858 _showstats(repo, stats)
855 _showstats(repo, stats)
859 if stats[3]:
856 if stats[3]:
860 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
861 "or 'hg update -C .' to abandon\n"))
858 "or 'hg update -C .' to abandon\n"))
862 elif remind:
859 elif remind:
863 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
860 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
864 return stats[3] > 0
861 return stats[3] > 0
865
862
866 def _incoming(displaychlist, subreporecurse, ui, repo, source,
863 def _incoming(displaychlist, subreporecurse, ui, repo, source,
867 opts, buffered=False):
864 opts, buffered=False):
868 """
865 """
869 Helper for incoming / gincoming.
866 Helper for incoming / gincoming.
870 displaychlist gets called with
867 displaychlist gets called with
871 (remoterepo, incomingchangesetlist, displayer) parameters,
868 (remoterepo, incomingchangesetlist, displayer) parameters,
872 and is supposed to contain only code that can't be unified.
869 and is supposed to contain only code that can't be unified.
873 """
870 """
874 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
871 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
875 other = peer(repo, opts, source)
872 other = peer(repo, opts, source)
876 ui.status(_('comparing with %s\n') % util.hidepassword(source))
873 ui.status(_('comparing with %s\n') % util.hidepassword(source))
877 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
874 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
878
875
879 if revs:
876 if revs:
880 revs = [other.lookup(rev) for rev in revs]
877 revs = [other.lookup(rev) for rev in revs]
881 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
878 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
882 revs, opts["bundle"], opts["force"])
879 revs, opts["bundle"], opts["force"])
883 try:
880 try:
884 if not chlist:
881 if not chlist:
885 ui.status(_("no changes found\n"))
882 ui.status(_("no changes found\n"))
886 return subreporecurse()
883 return subreporecurse()
887 ui.pager('incoming')
884 ui.pager('incoming')
888 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
885 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
889 displaychlist(other, chlist, displayer)
886 displaychlist(other, chlist, displayer)
890 displayer.close()
887 displayer.close()
891 finally:
888 finally:
892 cleanupfn()
889 cleanupfn()
893 subreporecurse()
890 subreporecurse()
894 return 0 # exit code is zero since we found incoming changes
891 return 0 # exit code is zero since we found incoming changes
895
892
896 def incoming(ui, repo, source, opts):
893 def incoming(ui, repo, source, opts):
897 def subreporecurse():
894 def subreporecurse():
898 ret = 1
895 ret = 1
899 if opts.get('subrepos'):
896 if opts.get('subrepos'):
900 ctx = repo[None]
897 ctx = repo[None]
901 for subpath in sorted(ctx.substate):
898 for subpath in sorted(ctx.substate):
902 sub = ctx.sub(subpath)
899 sub = ctx.sub(subpath)
903 ret = min(ret, sub.incoming(ui, source, opts))
900 ret = min(ret, sub.incoming(ui, source, opts))
904 return ret
901 return ret
905
902
906 def display(other, chlist, displayer):
903 def display(other, chlist, displayer):
907 limit = cmdutil.loglimit(opts)
904 limit = cmdutil.loglimit(opts)
908 if opts.get('newest_first'):
905 if opts.get('newest_first'):
909 chlist.reverse()
906 chlist.reverse()
910 count = 0
907 count = 0
911 for n in chlist:
908 for n in chlist:
912 if limit is not None and count >= limit:
909 if limit is not None and count >= limit:
913 break
910 break
914 parents = [p for p in other.changelog.parents(n) if p != nullid]
911 parents = [p for p in other.changelog.parents(n) if p != nullid]
915 if opts.get('no_merges') and len(parents) == 2:
912 if opts.get('no_merges') and len(parents) == 2:
916 continue
913 continue
917 count += 1
914 count += 1
918 displayer.show(other[n])
915 displayer.show(other[n])
919 return _incoming(display, subreporecurse, ui, repo, source, opts)
916 return _incoming(display, subreporecurse, ui, repo, source, opts)
920
917
921 def _outgoing(ui, repo, dest, opts):
918 def _outgoing(ui, repo, dest, opts):
922 path = ui.paths.getpath(dest, default=('default-push', 'default'))
919 path = ui.paths.getpath(dest, default=('default-push', 'default'))
923 if not path:
920 if not path:
924 raise error.Abort(_('default repository not configured!'),
921 raise error.Abort(_('default repository not configured!'),
925 hint=_("see 'hg help config.paths'"))
922 hint=_("see 'hg help config.paths'"))
926 dest = path.pushloc or path.loc
923 dest = path.pushloc or path.loc
927 branches = path.branch, opts.get('branch') or []
924 branches = path.branch, opts.get('branch') or []
928
925
929 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
926 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
930 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
927 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
931 if revs:
928 if revs:
932 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
929 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
933
930
934 other = peer(repo, opts, dest)
931 other = peer(repo, opts, dest)
935 outgoing = discovery.findcommonoutgoing(repo, other, revs,
932 outgoing = discovery.findcommonoutgoing(repo, other, revs,
936 force=opts.get('force'))
933 force=opts.get('force'))
937 o = outgoing.missing
934 o = outgoing.missing
938 if not o:
935 if not o:
939 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
936 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
940 return o, other
937 return o, other
941
938
942 def outgoing(ui, repo, dest, opts):
939 def outgoing(ui, repo, dest, opts):
943 def recurse():
940 def recurse():
944 ret = 1
941 ret = 1
945 if opts.get('subrepos'):
942 if opts.get('subrepos'):
946 ctx = repo[None]
943 ctx = repo[None]
947 for subpath in sorted(ctx.substate):
944 for subpath in sorted(ctx.substate):
948 sub = ctx.sub(subpath)
945 sub = ctx.sub(subpath)
949 ret = min(ret, sub.outgoing(ui, dest, opts))
946 ret = min(ret, sub.outgoing(ui, dest, opts))
950 return ret
947 return ret
951
948
952 limit = cmdutil.loglimit(opts)
949 limit = cmdutil.loglimit(opts)
953 o, other = _outgoing(ui, repo, dest, opts)
950 o, other = _outgoing(ui, repo, dest, opts)
954 if not o:
951 if not o:
955 cmdutil.outgoinghooks(ui, repo, other, opts, o)
952 cmdutil.outgoinghooks(ui, repo, other, opts, o)
956 return recurse()
953 return recurse()
957
954
958 if opts.get('newest_first'):
955 if opts.get('newest_first'):
959 o.reverse()
956 o.reverse()
960 ui.pager('outgoing')
957 ui.pager('outgoing')
961 displayer = cmdutil.show_changeset(ui, repo, opts)
958 displayer = cmdutil.show_changeset(ui, repo, opts)
962 count = 0
959 count = 0
963 for n in o:
960 for n in o:
964 if limit is not None and count >= limit:
961 if limit is not None and count >= limit:
965 break
962 break
966 parents = [p for p in repo.changelog.parents(n) if p != nullid]
963 parents = [p for p in repo.changelog.parents(n) if p != nullid]
967 if opts.get('no_merges') and len(parents) == 2:
964 if opts.get('no_merges') and len(parents) == 2:
968 continue
965 continue
969 count += 1
966 count += 1
970 displayer.show(repo[n])
967 displayer.show(repo[n])
971 displayer.close()
968 displayer.close()
972 cmdutil.outgoinghooks(ui, repo, other, opts, o)
969 cmdutil.outgoinghooks(ui, repo, other, opts, o)
973 recurse()
970 recurse()
974 return 0 # exit code is zero since we found outgoing changes
971 return 0 # exit code is zero since we found outgoing changes
975
972
976 def verify(repo):
973 def verify(repo):
977 """verify the consistency of a repository"""
974 """verify the consistency of a repository"""
978 ret = verifymod.verify(repo)
975 ret = verifymod.verify(repo)
979
976
980 # Broken subrepo references in hidden csets don't seem worth worrying about,
977 # Broken subrepo references in hidden csets don't seem worth worrying about,
981 # since they can't be pushed/pulled, and --hidden can be used if they are a
978 # since they can't be pushed/pulled, and --hidden can be used if they are a
982 # concern.
979 # concern.
983
980
984 # pathto() is needed for -R case
981 # pathto() is needed for -R case
985 revs = repo.revs("filelog(%s)",
982 revs = repo.revs("filelog(%s)",
986 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
983 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
987
984
988 if revs:
985 if revs:
989 repo.ui.status(_('checking subrepo links\n'))
986 repo.ui.status(_('checking subrepo links\n'))
990 for rev in revs:
987 for rev in revs:
991 ctx = repo[rev]
988 ctx = repo[rev]
992 try:
989 try:
993 for subpath in ctx.substate:
990 for subpath in ctx.substate:
994 try:
991 try:
995 ret = (ctx.sub(subpath, allowcreate=False).verify()
992 ret = (ctx.sub(subpath, allowcreate=False).verify()
996 or ret)
993 or ret)
997 except error.RepoError as e:
994 except error.RepoError as e:
998 repo.ui.warn(('%s: %s\n') % (rev, e))
995 repo.ui.warn(('%s: %s\n') % (rev, e))
999 except Exception:
996 except Exception:
1000 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
997 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1001 node.short(ctx.node()))
998 node.short(ctx.node()))
1002
999
1003 return ret
1000 return ret
1004
1001
1005 def remoteui(src, opts):
1002 def remoteui(src, opts):
1006 'build a remote ui from ui or repo and opts'
1003 'build a remote ui from ui or repo and opts'
1007 if util.safehasattr(src, 'baseui'): # looks like a repository
1004 if util.safehasattr(src, 'baseui'): # looks like a repository
1008 dst = src.baseui.copy() # drop repo-specific config
1005 dst = src.baseui.copy() # drop repo-specific config
1009 src = src.ui # copy target options from repo
1006 src = src.ui # copy target options from repo
1010 else: # assume it's a global ui object
1007 else: # assume it's a global ui object
1011 dst = src.copy() # keep all global options
1008 dst = src.copy() # keep all global options
1012
1009
1013 # copy ssh-specific options
1010 # copy ssh-specific options
1014 for o in 'ssh', 'remotecmd':
1011 for o in 'ssh', 'remotecmd':
1015 v = opts.get(o) or src.config('ui', o)
1012 v = opts.get(o) or src.config('ui', o)
1016 if v:
1013 if v:
1017 dst.setconfig("ui", o, v, 'copied')
1014 dst.setconfig("ui", o, v, 'copied')
1018
1015
1019 # copy bundle-specific options
1016 # copy bundle-specific options
1020 r = src.config('bundle', 'mainreporoot')
1017 r = src.config('bundle', 'mainreporoot')
1021 if r:
1018 if r:
1022 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1019 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1023
1020
1024 # copy selected local settings to the remote ui
1021 # copy selected local settings to the remote ui
1025 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1022 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1026 for key, val in src.configitems(sect):
1023 for key, val in src.configitems(sect):
1027 dst.setconfig(sect, key, val, 'copied')
1024 dst.setconfig(sect, key, val, 'copied')
1028 v = src.config('web', 'cacerts')
1025 v = src.config('web', 'cacerts')
1029 if v:
1026 if v:
1030 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1027 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1031
1028
1032 return dst
1029 return dst
1033
1030
1034 # Files of interest
1031 # Files of interest
1035 # Used to check if the repository has changed looking at mtime and size of
1032 # Used to check if the repository has changed looking at mtime and size of
1036 # these files.
1033 # these files.
1037 foi = [('spath', '00changelog.i'),
1034 foi = [('spath', '00changelog.i'),
1038 ('spath', 'phaseroots'), # ! phase can change content at the same size
1035 ('spath', 'phaseroots'), # ! phase can change content at the same size
1039 ('spath', 'obsstore'),
1036 ('spath', 'obsstore'),
1040 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1037 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1041 ]
1038 ]
1042
1039
1043 class cachedlocalrepo(object):
1040 class cachedlocalrepo(object):
1044 """Holds a localrepository that can be cached and reused."""
1041 """Holds a localrepository that can be cached and reused."""
1045
1042
1046 def __init__(self, repo):
1043 def __init__(self, repo):
1047 """Create a new cached repo from an existing repo.
1044 """Create a new cached repo from an existing repo.
1048
1045
1049 We assume the passed in repo was recently created. If the
1046 We assume the passed in repo was recently created. If the
1050 repo has changed between when it was created and when it was
1047 repo has changed between when it was created and when it was
1051 turned into a cache, it may not refresh properly.
1048 turned into a cache, it may not refresh properly.
1052 """
1049 """
1053 assert isinstance(repo, localrepo.localrepository)
1050 assert isinstance(repo, localrepo.localrepository)
1054 self._repo = repo
1051 self._repo = repo
1055 self._state, self.mtime = self._repostate()
1052 self._state, self.mtime = self._repostate()
1056 self._filtername = repo.filtername
1053 self._filtername = repo.filtername
1057
1054
1058 def fetch(self):
1055 def fetch(self):
1059 """Refresh (if necessary) and return a repository.
1056 """Refresh (if necessary) and return a repository.
1060
1057
1061 If the cached instance is out of date, it will be recreated
1058 If the cached instance is out of date, it will be recreated
1062 automatically and returned.
1059 automatically and returned.
1063
1060
1064 Returns a tuple of the repo and a boolean indicating whether a new
1061 Returns a tuple of the repo and a boolean indicating whether a new
1065 repo instance was created.
1062 repo instance was created.
1066 """
1063 """
1067 # We compare the mtimes and sizes of some well-known files to
1064 # We compare the mtimes and sizes of some well-known files to
1068 # determine if the repo changed. This is not precise, as mtimes
1065 # determine if the repo changed. This is not precise, as mtimes
1069 # are susceptible to clock skew and imprecise filesystems and
1066 # are susceptible to clock skew and imprecise filesystems and
1070 # file content can change while maintaining the same size.
1067 # file content can change while maintaining the same size.
1071
1068
1072 state, mtime = self._repostate()
1069 state, mtime = self._repostate()
1073 if state == self._state:
1070 if state == self._state:
1074 return self._repo, False
1071 return self._repo, False
1075
1072
1076 repo = repository(self._repo.baseui, self._repo.url())
1073 repo = repository(self._repo.baseui, self._repo.url())
1077 if self._filtername:
1074 if self._filtername:
1078 self._repo = repo.filtered(self._filtername)
1075 self._repo = repo.filtered(self._filtername)
1079 else:
1076 else:
1080 self._repo = repo.unfiltered()
1077 self._repo = repo.unfiltered()
1081 self._state = state
1078 self._state = state
1082 self.mtime = mtime
1079 self.mtime = mtime
1083
1080
1084 return self._repo, True
1081 return self._repo, True
1085
1082
1086 def _repostate(self):
1083 def _repostate(self):
1087 state = []
1084 state = []
1088 maxmtime = -1
1085 maxmtime = -1
1089 for attr, fname in foi:
1086 for attr, fname in foi:
1090 prefix = getattr(self._repo, attr)
1087 prefix = getattr(self._repo, attr)
1091 p = os.path.join(prefix, fname)
1088 p = os.path.join(prefix, fname)
1092 try:
1089 try:
1093 st = os.stat(p)
1090 st = os.stat(p)
1094 except OSError:
1091 except OSError:
1095 st = os.stat(prefix)
1092 st = os.stat(prefix)
1096 state.append((st.st_mtime, st.st_size))
1093 state.append((st.st_mtime, st.st_size))
1097 maxmtime = max(maxmtime, st.st_mtime)
1094 maxmtime = max(maxmtime, st.st_mtime)
1098
1095
1099 return tuple(state), maxmtime
1096 return tuple(state), maxmtime
1100
1097
1101 def copy(self):
1098 def copy(self):
1102 """Obtain a copy of this class instance.
1099 """Obtain a copy of this class instance.
1103
1100
1104 A new localrepository instance is obtained. The new instance should be
1101 A new localrepository instance is obtained. The new instance should be
1105 completely independent of the original.
1102 completely independent of the original.
1106 """
1103 """
1107 repo = repository(self._repo.baseui, self._repo.origroot)
1104 repo = repository(self._repo.baseui, self._repo.origroot)
1108 if self._filtername:
1105 if self._filtername:
1109 repo = repo.filtered(self._filtername)
1106 repo = repo.filtered(self._filtername)
1110 else:
1107 else:
1111 repo = repo.unfiltered()
1108 repo = repo.unfiltered()
1112 c = cachedlocalrepo(repo)
1109 c = cachedlocalrepo(repo)
1113 c._state = self._state
1110 c._state = self._state
1114 c.mtime = self.mtime
1111 c.mtime = self.mtime
1115 return c
1112 return c
General Comments 0
You need to be logged in to leave comments. Login now