##// END OF EJS Templates
share: move the implementation of 'unshare' to the 'hg' module...
Matt Harbison -
r34879:9f7ecc5b default
parent child Browse files
Show More
@@ -1,232 +1,211 b''
1 1 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 '''share a common history between several working directories
7 7
8 8 Automatic Pooled Storage for Clones
9 9 -----------------------------------
10 10
11 11 When this extension is active, :hg:`clone` can be configured to
12 12 automatically share/pool storage across multiple clones. This
13 13 mode effectively converts :hg:`clone` to :hg:`clone` + :hg:`share`.
14 14 The benefit of using this mode is the automatic management of
15 15 store paths and intelligent pooling of related repositories.
16 16
17 17 The following ``share.`` config options influence this feature:
18 18
19 19 ``share.pool``
20 20 Filesystem path where shared repository data will be stored. When
21 21 defined, :hg:`clone` will automatically use shared repository
22 22 storage instead of creating a store inside each clone.
23 23
24 24 ``share.poolnaming``
25 25 How directory names in ``share.pool`` are constructed.
26 26
27 27 "identity" means the name is derived from the first changeset in the
28 28 repository. In this mode, different remotes share storage if their
29 29 root/initial changeset is identical. In this mode, the local shared
30 30 repository is an aggregate of all encountered remote repositories.
31 31
32 32 "remote" means the name is derived from the source repository's
33 33 path or URL. In this mode, storage is only shared if the path or URL
34 34 requested in the :hg:`clone` command matches exactly to a repository
35 35 that was cloned before.
36 36
37 37 The default naming mode is "identity."
38 38 '''
39 39
40 40 from __future__ import absolute_import
41 41
42 42 import errno
43 43 from mercurial.i18n import _
44 44 from mercurial import (
45 45 bookmarks,
46 46 commands,
47 47 error,
48 48 extensions,
49 49 hg,
50 50 registrar,
51 51 txnutil,
52 52 util,
53 53 )
54 54
55 55 repository = hg.repository
56 56 parseurl = hg.parseurl
57 57
58 58 cmdtable = {}
59 59 command = registrar.command(cmdtable)
60 60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 62 # be specifying the version(s) of Mercurial they are tested with, or
63 63 # leave the attribute unspecified.
64 64 testedwith = 'ships-with-hg-core'
65 65
66 66 configtable = {}
67 67 configitem = registrar.configitem(configtable)
68 68
69 69 configitem('share', 'pool',
70 70 default=None,
71 71 )
72 72 configitem('share', 'poolnaming',
73 73 default='identity',
74 74 )
75 75
76 76 @command('share',
77 77 [('U', 'noupdate', None, _('do not create a working directory')),
78 78 ('B', 'bookmarks', None, _('also share bookmarks')),
79 79 ('', 'relative', None, _('point to source using a relative path '
80 80 '(EXPERIMENTAL)')),
81 81 ],
82 82 _('[-U] [-B] SOURCE [DEST]'),
83 83 norepo=True)
84 84 def share(ui, source, dest=None, noupdate=False, bookmarks=False,
85 85 relative=False):
86 86 """create a new shared repository
87 87
88 88 Initialize a new repository and working directory that shares its
89 89 history (and optionally bookmarks) with another repository.
90 90
91 91 .. note::
92 92
93 93 using rollback or extensions that destroy/modify history (mq,
94 94 rebase, etc.) can cause considerable confusion with shared
95 95 clones. In particular, if two shared clones are both updated to
96 96 the same changeset, and one of them destroys that changeset
97 97 with rollback, the other clone will suddenly stop working: all
98 98 operations will fail with "abort: working directory has unknown
99 99 parent". The only known workaround is to use debugsetparents on
100 100 the broken clone to reset it to a changeset that still exists.
101 101 """
102 102
103 103 hg.share(ui, source, dest=dest, update=not noupdate,
104 104 bookmarks=bookmarks, relative=relative)
105 105 return 0
106 106
107 107 @command('unshare', [], '')
108 108 def unshare(ui, repo):
109 109 """convert a shared repository to a normal one
110 110
111 111 Copy the store data to the repo and remove the sharedpath data.
112 112 """
113 113
114 114 if not repo.shared():
115 115 raise error.Abort(_("this is not a shared repo"))
116 116
117 destlock = lock = None
118 lock = repo.lock()
119 try:
120 # we use locks here because if we race with commit, we
121 # can end up with extra data in the cloned revlogs that's
122 # not pointed to by changesets, thus causing verify to
123 # fail
124
125 destlock = hg.copystore(ui, repo, repo.path)
126
127 sharefile = repo.vfs.join('sharedpath')
128 util.rename(sharefile, sharefile + '.old')
129
130 repo.requirements.discard('shared')
131 repo.requirements.discard('relshared')
132 repo._writerequirements()
133 finally:
134 destlock and destlock.release()
135 lock and lock.release()
136
137 # update store, spath, svfs and sjoin of repo
138 repo.unfiltered().__init__(repo.baseui, repo.root)
117 hg.unshare(ui, repo)
139 118
140 119 # Wrap clone command to pass auto share options.
141 120 def clone(orig, ui, source, *args, **opts):
142 121 pool = ui.config('share', 'pool')
143 122 if pool:
144 123 pool = util.expandpath(pool)
145 124
146 125 opts[r'shareopts'] = {
147 126 'pool': pool,
148 127 'mode': ui.config('share', 'poolnaming'),
149 128 }
150 129
151 130 return orig(ui, source, *args, **opts)
152 131
153 132 def extsetup(ui):
154 133 extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
155 134 extensions.wrapfunction(bookmarks.bmstore, '_recordchange', recordchange)
156 135 extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
157 136 extensions.wrapcommand(commands.table, 'clone', clone)
158 137
159 138 def _hassharedbookmarks(repo):
160 139 """Returns whether this repo has shared bookmarks"""
161 140 try:
162 141 shared = repo.vfs.read('shared').splitlines()
163 142 except IOError as inst:
164 143 if inst.errno != errno.ENOENT:
165 144 raise
166 145 return False
167 146 return hg.sharedbookmarks in shared
168 147
169 148 def _getsrcrepo(repo):
170 149 """
171 150 Returns the source repository object for a given shared repository.
172 151 If repo is not a shared repository, return None.
173 152 """
174 153 if repo.sharedpath == repo.path:
175 154 return None
176 155
177 156 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
178 157 return repo.srcrepo
179 158
180 159 # the sharedpath always ends in the .hg; we want the path to the repo
181 160 source = repo.vfs.split(repo.sharedpath)[0]
182 161 srcurl, branches = parseurl(source)
183 162 srcrepo = repository(repo.ui, srcurl)
184 163 repo.srcrepo = srcrepo
185 164 return srcrepo
186 165
187 166 def getbkfile(orig, repo):
188 167 if _hassharedbookmarks(repo):
189 168 srcrepo = _getsrcrepo(repo)
190 169 if srcrepo is not None:
191 170 # just orig(srcrepo) doesn't work as expected, because
192 171 # HG_PENDING refers repo.root.
193 172 try:
194 173 fp, pending = txnutil.trypending(repo.root, repo.vfs,
195 174 'bookmarks')
196 175 if pending:
197 176 # only in this case, bookmark information in repo
198 177 # is up-to-date.
199 178 return fp
200 179 fp.close()
201 180 except IOError as inst:
202 181 if inst.errno != errno.ENOENT:
203 182 raise
204 183
205 184 # otherwise, we should read bookmarks from srcrepo,
206 185 # because .hg/bookmarks in srcrepo might be already
207 186 # changed via another sharing repo
208 187 repo = srcrepo
209 188
210 189 # TODO: Pending changes in repo are still invisible in
211 190 # srcrepo, because bookmarks.pending is written only into repo.
212 191 # See also https://www.mercurial-scm.org/wiki/SharedRepository
213 192 return orig(repo)
214 193
215 194 def recordchange(orig, self, tr):
216 195 # Continue with write to local bookmarks file as usual
217 196 orig(self, tr)
218 197
219 198 if _hassharedbookmarks(self._repo):
220 199 srcrepo = _getsrcrepo(self._repo)
221 200 if srcrepo is not None:
222 201 category = 'share-bookmarks'
223 202 tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
224 203
225 204 def writerepo(orig, self, repo):
226 205 # First write local bookmarks file in case we ever unshare
227 206 orig(self, repo)
228 207
229 208 if _hassharedbookmarks(self._repo):
230 209 srcrepo = _getsrcrepo(self._repo)
231 210 if srcrepo is not None:
232 211 orig(self, srcrepo)
@@ -1,1065 +1,1094 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18
19 19 from . import (
20 20 bookmarks,
21 21 bundlerepo,
22 22 cmdutil,
23 23 destutil,
24 24 discovery,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 httppeer,
29 29 localrepo,
30 30 lock,
31 31 merge as mergemod,
32 32 node,
33 33 phases,
34 34 repoview,
35 35 scmutil,
36 36 sshpeer,
37 37 statichttprepo,
38 38 ui as uimod,
39 39 unionrepo,
40 40 url,
41 41 util,
42 42 verify as verifymod,
43 43 vfs as vfsmod,
44 44 )
45 45
46 46 release = lock.release
47 47
48 48 # shared features
49 49 sharedbookmarks = 'bookmarks'
50 50
51 51 def _local(path):
52 52 path = util.expandpath(util.urllocalpath(path))
53 53 return (os.path.isfile(path) and bundlerepo or localrepo)
54 54
55 55 def addbranchrevs(lrepo, other, branches, revs):
56 56 peer = other.peer() # a courtesy to callers using a localrepo for other
57 57 hashbranch, branches = branches
58 58 if not hashbranch and not branches:
59 59 x = revs or None
60 60 if util.safehasattr(revs, 'first'):
61 61 y = revs.first()
62 62 elif revs:
63 63 y = revs[0]
64 64 else:
65 65 y = None
66 66 return x, y
67 67 if revs:
68 68 revs = list(revs)
69 69 else:
70 70 revs = []
71 71
72 72 if not peer.capable('branchmap'):
73 73 if branches:
74 74 raise error.Abort(_("remote branch lookup not supported"))
75 75 revs.append(hashbranch)
76 76 return revs, revs[0]
77 77 branchmap = peer.branchmap()
78 78
79 79 def primary(branch):
80 80 if branch == '.':
81 81 if not lrepo:
82 82 raise error.Abort(_("dirstate branch not accessible"))
83 83 branch = lrepo.dirstate.branch()
84 84 if branch in branchmap:
85 85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 86 return True
87 87 else:
88 88 return False
89 89
90 90 for branch in branches:
91 91 if not primary(branch):
92 92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 93 if hashbranch:
94 94 if not primary(hashbranch):
95 95 revs.append(hashbranch)
96 96 return revs, revs[0]
97 97
98 98 def parseurl(path, branches=None):
99 99 '''parse url#branch, returning (url, (branch, branches))'''
100 100
101 101 u = util.url(path)
102 102 branch = None
103 103 if u.fragment:
104 104 branch = u.fragment
105 105 u.fragment = None
106 106 return bytes(u), (branch, branches or [])
107 107
108 108 schemes = {
109 109 'bundle': bundlerepo,
110 110 'union': unionrepo,
111 111 'file': _local,
112 112 'http': httppeer,
113 113 'https': httppeer,
114 114 'ssh': sshpeer,
115 115 'static-http': statichttprepo,
116 116 }
117 117
118 118 def _peerlookup(path):
119 119 u = util.url(path)
120 120 scheme = u.scheme or 'file'
121 121 thing = schemes.get(scheme) or schemes['file']
122 122 try:
123 123 return thing(path)
124 124 except TypeError:
125 125 # we can't test callable(thing) because 'thing' can be an unloaded
126 126 # module that implements __call__
127 127 if not util.safehasattr(thing, 'instance'):
128 128 raise
129 129 return thing
130 130
131 131 def islocal(repo):
132 132 '''return true if repo (or path pointing to repo) is local'''
133 133 if isinstance(repo, bytes):
134 134 try:
135 135 return _peerlookup(repo).islocal(repo)
136 136 except AttributeError:
137 137 return False
138 138 return repo.local()
139 139
140 140 def openpath(ui, path):
141 141 '''open path with open if local, url.open if remote'''
142 142 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 143 if pathurl.islocal():
144 144 return util.posixfile(pathurl.localpath(), 'rb')
145 145 else:
146 146 return url.open(ui, path)
147 147
148 148 # a list of (ui, repo) functions called for wire peer initialization
149 149 wirepeersetupfuncs = []
150 150
151 151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 152 """return a repository object for the specified path"""
153 153 obj = _peerlookup(path).instance(ui, path, create)
154 154 ui = getattr(obj, "ui", ui)
155 155 for f in presetupfuncs or []:
156 156 f(ui, obj)
157 157 for name, module in extensions.extensions(ui):
158 158 hook = getattr(module, 'reposetup', None)
159 159 if hook:
160 160 hook(ui, obj)
161 161 if not obj.local():
162 162 for f in wirepeersetupfuncs:
163 163 f(ui, obj)
164 164 return obj
165 165
166 166 def repository(ui, path='', create=False, presetupfuncs=None):
167 167 """return a repository object for the specified path"""
168 168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 169 repo = peer.local()
170 170 if not repo:
171 171 raise error.Abort(_("repository '%s' is not local") %
172 172 (path or peer.url()))
173 173 return repo.filtered('visible')
174 174
175 175 def peer(uiorrepo, opts, path, create=False):
176 176 '''return a repository peer for the specified path'''
177 177 rui = remoteui(uiorrepo, opts)
178 178 return _peerorrepo(rui, path, create).peer()
179 179
180 180 def defaultdest(source):
181 181 '''return default destination of clone if none is given
182 182
183 183 >>> defaultdest(b'foo')
184 184 'foo'
185 185 >>> defaultdest(b'/foo/bar')
186 186 'bar'
187 187 >>> defaultdest(b'/')
188 188 ''
189 189 >>> defaultdest(b'')
190 190 ''
191 191 >>> defaultdest(b'http://example.org/')
192 192 ''
193 193 >>> defaultdest(b'http://example.org/foo/')
194 194 'foo'
195 195 '''
196 196 path = util.url(source).path
197 197 if not path:
198 198 return ''
199 199 return os.path.basename(os.path.normpath(path))
200 200
201 201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 202 relative=False):
203 203 '''create a shared repository'''
204 204
205 205 if not islocal(source):
206 206 raise error.Abort(_('can only share local repositories'))
207 207
208 208 if not dest:
209 209 dest = defaultdest(source)
210 210 else:
211 211 dest = ui.expandpath(dest)
212 212
213 213 if isinstance(source, str):
214 214 origsource = ui.expandpath(source)
215 215 source, branches = parseurl(origsource)
216 216 srcrepo = repository(ui, source)
217 217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 218 else:
219 219 srcrepo = source.local()
220 220 origsource = source = srcrepo.url()
221 221 checkout = None
222 222
223 223 sharedpath = srcrepo.sharedpath # if our source is already sharing
224 224
225 225 destwvfs = vfsmod.vfs(dest, realpath=True)
226 226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227 227
228 228 if destvfs.lexists():
229 229 raise error.Abort(_('destination already exists'))
230 230
231 231 if not destwvfs.isdir():
232 232 destwvfs.mkdir()
233 233 destvfs.makedir()
234 234
235 235 requirements = ''
236 236 try:
237 237 requirements = srcrepo.vfs.read('requires')
238 238 except IOError as inst:
239 239 if inst.errno != errno.ENOENT:
240 240 raise
241 241
242 242 if relative:
243 243 try:
244 244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 245 requirements += 'relshared\n'
246 246 except IOError as e:
247 247 raise error.Abort(_('cannot calculate relative path'),
248 248 hint=str(e))
249 249 else:
250 250 requirements += 'shared\n'
251 251
252 252 destvfs.write('requires', requirements)
253 253 destvfs.write('sharedpath', sharedpath)
254 254
255 255 r = repository(ui, destwvfs.base)
256 256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
257 257 _postshareupdate(r, update, checkout=checkout)
258 258 return r
259 259
260 def unshare(ui, repo):
261 """convert a shared repository to a normal one
262
263 Copy the store data to the repo and remove the sharedpath data.
264 """
265
266 destlock = lock = None
267 lock = repo.lock()
268 try:
269 # we use locks here because if we race with commit, we
270 # can end up with extra data in the cloned revlogs that's
271 # not pointed to by changesets, thus causing verify to
272 # fail
273
274 destlock = copystore(ui, repo, repo.path)
275
276 sharefile = repo.vfs.join('sharedpath')
277 util.rename(sharefile, sharefile + '.old')
278
279 repo.requirements.discard('shared')
280 repo.requirements.discard('relshared')
281 repo._writerequirements()
282 finally:
283 destlock and destlock.release()
284 lock and lock.release()
285
286 # update store, spath, svfs and sjoin of repo
287 repo.unfiltered().__init__(repo.baseui, repo.root)
288
260 289 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
261 290 """Called after a new shared repo is created.
262 291
263 292 The new repo only has a requirements file and pointer to the source.
264 293 This function configures additional shared data.
265 294
266 295 Extensions can wrap this function and write additional entries to
267 296 destrepo/.hg/shared to indicate additional pieces of data to be shared.
268 297 """
269 298 default = defaultpath or sourcerepo.ui.config('paths', 'default')
270 299 if default:
271 300 fp = destrepo.vfs("hgrc", "w", text=True)
272 301 fp.write("[paths]\n")
273 302 fp.write("default = %s\n" % default)
274 303 fp.close()
275 304
276 305 with destrepo.wlock():
277 306 if bookmarks:
278 307 fp = destrepo.vfs('shared', 'w')
279 308 fp.write(sharedbookmarks + '\n')
280 309 fp.close()
281 310
282 311 def _postshareupdate(repo, update, checkout=None):
283 312 """Maybe perform a working directory update after a shared repo is created.
284 313
285 314 ``update`` can be a boolean or a revision to update to.
286 315 """
287 316 if not update:
288 317 return
289 318
290 319 repo.ui.status(_("updating working directory\n"))
291 320 if update is not True:
292 321 checkout = update
293 322 for test in (checkout, 'default', 'tip'):
294 323 if test is None:
295 324 continue
296 325 try:
297 326 uprev = repo.lookup(test)
298 327 break
299 328 except error.RepoLookupError:
300 329 continue
301 330 _update(repo, uprev)
302 331
303 332 def copystore(ui, srcrepo, destpath):
304 333 '''copy files from store of srcrepo in destpath
305 334
306 335 returns destlock
307 336 '''
308 337 destlock = None
309 338 try:
310 339 hardlink = None
311 340 num = 0
312 341 closetopic = [None]
313 342 def prog(topic, pos):
314 343 if pos is None:
315 344 closetopic[0] = topic
316 345 else:
317 346 ui.progress(topic, pos + num)
318 347 srcpublishing = srcrepo.publishing()
319 348 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
320 349 dstvfs = vfsmod.vfs(destpath)
321 350 for f in srcrepo.store.copylist():
322 351 if srcpublishing and f.endswith('phaseroots'):
323 352 continue
324 353 dstbase = os.path.dirname(f)
325 354 if dstbase and not dstvfs.exists(dstbase):
326 355 dstvfs.mkdir(dstbase)
327 356 if srcvfs.exists(f):
328 357 if f.endswith('data'):
329 358 # 'dstbase' may be empty (e.g. revlog format 0)
330 359 lockfile = os.path.join(dstbase, "lock")
331 360 # lock to avoid premature writing to the target
332 361 destlock = lock.lock(dstvfs, lockfile)
333 362 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
334 363 hardlink, progress=prog)
335 364 num += n
336 365 if hardlink:
337 366 ui.debug("linked %d files\n" % num)
338 367 if closetopic[0]:
339 368 ui.progress(closetopic[0], None)
340 369 else:
341 370 ui.debug("copied %d files\n" % num)
342 371 if closetopic[0]:
343 372 ui.progress(closetopic[0], None)
344 373 return destlock
345 374 except: # re-raises
346 375 release(destlock)
347 376 raise
348 377
349 378 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
350 379 rev=None, update=True, stream=False):
351 380 """Perform a clone using a shared repo.
352 381
353 382 The store for the repository will be located at <sharepath>/.hg. The
354 383 specified revisions will be cloned or pulled from "source". A shared repo
355 384 will be created at "dest" and a working copy will be created if "update" is
356 385 True.
357 386 """
358 387 revs = None
359 388 if rev:
360 389 if not srcpeer.capable('lookup'):
361 390 raise error.Abort(_("src repository does not support "
362 391 "revision lookup and so doesn't "
363 392 "support clone by revision"))
364 393 revs = [srcpeer.lookup(r) for r in rev]
365 394
366 395 # Obtain a lock before checking for or cloning the pooled repo otherwise
367 396 # 2 clients may race creating or populating it.
368 397 pooldir = os.path.dirname(sharepath)
369 398 # lock class requires the directory to exist.
370 399 try:
371 400 util.makedir(pooldir, False)
372 401 except OSError as e:
373 402 if e.errno != errno.EEXIST:
374 403 raise
375 404
376 405 poolvfs = vfsmod.vfs(pooldir)
377 406 basename = os.path.basename(sharepath)
378 407
379 408 with lock.lock(poolvfs, '%s.lock' % basename):
380 409 if os.path.exists(sharepath):
381 410 ui.status(_('(sharing from existing pooled repository %s)\n') %
382 411 basename)
383 412 else:
384 413 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
385 414 # Always use pull mode because hardlinks in share mode don't work
386 415 # well. Never update because working copies aren't necessary in
387 416 # share mode.
388 417 clone(ui, peeropts, source, dest=sharepath, pull=True,
389 418 rev=rev, update=False, stream=stream)
390 419
391 420 # Resolve the value to put in [paths] section for the source.
392 421 if islocal(source):
393 422 defaultpath = os.path.abspath(util.urllocalpath(source))
394 423 else:
395 424 defaultpath = source
396 425
397 426 sharerepo = repository(ui, path=sharepath)
398 427 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
399 428 defaultpath=defaultpath)
400 429
401 430 # We need to perform a pull against the dest repo to fetch bookmarks
402 431 # and other non-store data that isn't shared by default. In the case of
403 432 # non-existing shared repo, this means we pull from the remote twice. This
404 433 # is a bit weird. But at the time it was implemented, there wasn't an easy
405 434 # way to pull just non-changegroup data.
406 435 destrepo = repository(ui, path=dest)
407 436 exchange.pull(destrepo, srcpeer, heads=revs)
408 437
409 438 _postshareupdate(destrepo, update)
410 439
411 440 return srcpeer, peer(ui, peeropts, dest)
412 441
413 442 # Recomputing branch cache might be slow on big repos,
414 443 # so just copy it
415 444 def _copycache(srcrepo, dstcachedir, fname):
416 445 """copy a cache from srcrepo to destcachedir (if it exists)"""
417 446 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
418 447 dstbranchcache = os.path.join(dstcachedir, fname)
419 448 if os.path.exists(srcbranchcache):
420 449 if not os.path.exists(dstcachedir):
421 450 os.mkdir(dstcachedir)
422 451 util.copyfile(srcbranchcache, dstbranchcache)
423 452
424 453 def _cachetocopy(srcrepo):
425 454 """return the list of cache file valuable to copy during a clone"""
426 455 # In local clones we're copying all nodes, not just served
427 456 # ones. Therefore copy all branch caches over.
428 457 cachefiles = ['branch2']
429 458 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
430 459 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
431 460 cachefiles += ['tags2']
432 461 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
433 462 cachefiles += ['hgtagsfnodes1']
434 463 return cachefiles
435 464
436 465 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
437 466 update=True, stream=False, branch=None, shareopts=None):
438 467 """Make a copy of an existing repository.
439 468
440 469 Create a copy of an existing repository in a new directory. The
441 470 source and destination are URLs, as passed to the repository
442 471 function. Returns a pair of repository peers, the source and
443 472 newly created destination.
444 473
445 474 The location of the source is added to the new repository's
446 475 .hg/hgrc file, as the default to be used for future pulls and
447 476 pushes.
448 477
449 478 If an exception is raised, the partly cloned/updated destination
450 479 repository will be deleted.
451 480
452 481 Arguments:
453 482
454 483 source: repository object or URL
455 484
456 485 dest: URL of destination repository to create (defaults to base
457 486 name of source repository)
458 487
459 488 pull: always pull from source repository, even in local case or if the
460 489 server prefers streaming
461 490
462 491 stream: stream raw data uncompressed from repository (fast over
463 492 LAN, slow over WAN)
464 493
465 494 rev: revision to clone up to (implies pull=True)
466 495
467 496 update: update working directory after clone completes, if
468 497 destination is local repository (True means update to default rev,
469 498 anything else is treated as a revision)
470 499
471 500 branch: branches to clone
472 501
473 502 shareopts: dict of options to control auto sharing behavior. The "pool" key
474 503 activates auto sharing mode and defines the directory for stores. The
475 504 "mode" key determines how to construct the directory name of the shared
476 505 repository. "identity" means the name is derived from the node of the first
477 506 changeset in the repository. "remote" means the name is derived from the
478 507 remote's path/URL. Defaults to "identity."
479 508 """
480 509
481 510 if isinstance(source, bytes):
482 511 origsource = ui.expandpath(source)
483 512 source, branch = parseurl(origsource, branch)
484 513 srcpeer = peer(ui, peeropts, source)
485 514 else:
486 515 srcpeer = source.peer() # in case we were called with a localrepo
487 516 branch = (None, branch or [])
488 517 origsource = source = srcpeer.url()
489 518 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
490 519
491 520 if dest is None:
492 521 dest = defaultdest(source)
493 522 if dest:
494 523 ui.status(_("destination directory: %s\n") % dest)
495 524 else:
496 525 dest = ui.expandpath(dest)
497 526
498 527 dest = util.urllocalpath(dest)
499 528 source = util.urllocalpath(source)
500 529
501 530 if not dest:
502 531 raise error.Abort(_("empty destination path is not valid"))
503 532
504 533 destvfs = vfsmod.vfs(dest, expandpath=True)
505 534 if destvfs.lexists():
506 535 if not destvfs.isdir():
507 536 raise error.Abort(_("destination '%s' already exists") % dest)
508 537 elif destvfs.listdir():
509 538 raise error.Abort(_("destination '%s' is not empty") % dest)
510 539
511 540 shareopts = shareopts or {}
512 541 sharepool = shareopts.get('pool')
513 542 sharenamemode = shareopts.get('mode')
514 543 if sharepool and islocal(dest):
515 544 sharepath = None
516 545 if sharenamemode == 'identity':
517 546 # Resolve the name from the initial changeset in the remote
518 547 # repository. This returns nullid when the remote is empty. It
519 548 # raises RepoLookupError if revision 0 is filtered or otherwise
520 549 # not available. If we fail to resolve, sharing is not enabled.
521 550 try:
522 551 rootnode = srcpeer.lookup('0')
523 552 if rootnode != node.nullid:
524 553 sharepath = os.path.join(sharepool, node.hex(rootnode))
525 554 else:
526 555 ui.status(_('(not using pooled storage: '
527 556 'remote appears to be empty)\n'))
528 557 except error.RepoLookupError:
529 558 ui.status(_('(not using pooled storage: '
530 559 'unable to resolve identity of remote)\n'))
531 560 elif sharenamemode == 'remote':
532 561 sharepath = os.path.join(
533 562 sharepool, hashlib.sha1(source).hexdigest())
534 563 else:
535 564 raise error.Abort(_('unknown share naming mode: %s') %
536 565 sharenamemode)
537 566
538 567 if sharepath:
539 568 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
540 569 dest, pull=pull, rev=rev, update=update,
541 570 stream=stream)
542 571
543 572 srclock = destlock = cleandir = None
544 573 srcrepo = srcpeer.local()
545 574 try:
546 575 abspath = origsource
547 576 if islocal(origsource):
548 577 abspath = os.path.abspath(util.urllocalpath(origsource))
549 578
550 579 if islocal(dest):
551 580 cleandir = dest
552 581
553 582 copy = False
554 583 if (srcrepo and srcrepo.cancopy() and islocal(dest)
555 584 and not phases.hassecret(srcrepo)):
556 585 copy = not pull and not rev
557 586
558 587 if copy:
559 588 try:
560 589 # we use a lock here because if we race with commit, we
561 590 # can end up with extra data in the cloned revlogs that's
562 591 # not pointed to by changesets, thus causing verify to
563 592 # fail
564 593 srclock = srcrepo.lock(wait=False)
565 594 except error.LockError:
566 595 copy = False
567 596
568 597 if copy:
569 598 srcrepo.hook('preoutgoing', throw=True, source='clone')
570 599 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
571 600 if not os.path.exists(dest):
572 601 os.mkdir(dest)
573 602 else:
574 603 # only clean up directories we create ourselves
575 604 cleandir = hgdir
576 605 try:
577 606 destpath = hgdir
578 607 util.makedir(destpath, notindexed=True)
579 608 except OSError as inst:
580 609 if inst.errno == errno.EEXIST:
581 610 cleandir = None
582 611 raise error.Abort(_("destination '%s' already exists")
583 612 % dest)
584 613 raise
585 614
586 615 destlock = copystore(ui, srcrepo, destpath)
587 616 # copy bookmarks over
588 617 srcbookmarks = srcrepo.vfs.join('bookmarks')
589 618 dstbookmarks = os.path.join(destpath, 'bookmarks')
590 619 if os.path.exists(srcbookmarks):
591 620 util.copyfile(srcbookmarks, dstbookmarks)
592 621
593 622 dstcachedir = os.path.join(destpath, 'cache')
594 623 for cache in _cachetocopy(srcrepo):
595 624 _copycache(srcrepo, dstcachedir, cache)
596 625
597 626 # we need to re-init the repo after manually copying the data
598 627 # into it
599 628 destpeer = peer(srcrepo, peeropts, dest)
600 629 srcrepo.hook('outgoing', source='clone',
601 630 node=node.hex(node.nullid))
602 631 else:
603 632 try:
604 633 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
605 634 # only pass ui when no srcrepo
606 635 except OSError as inst:
607 636 if inst.errno == errno.EEXIST:
608 637 cleandir = None
609 638 raise error.Abort(_("destination '%s' already exists")
610 639 % dest)
611 640 raise
612 641
613 642 revs = None
614 643 if rev:
615 644 if not srcpeer.capable('lookup'):
616 645 raise error.Abort(_("src repository does not support "
617 646 "revision lookup and so doesn't "
618 647 "support clone by revision"))
619 648 revs = [srcpeer.lookup(r) for r in rev]
620 649 checkout = revs[0]
621 650 local = destpeer.local()
622 651 if local:
623 652 if not stream:
624 653 if pull:
625 654 stream = False
626 655 else:
627 656 stream = None
628 657 # internal config: ui.quietbookmarkmove
629 658 overrides = {('ui', 'quietbookmarkmove'): True}
630 659 with local.ui.configoverride(overrides, 'clone'):
631 660 exchange.pull(local, srcpeer, revs,
632 661 streamclonerequested=stream)
633 662 elif srcrepo:
634 663 exchange.push(srcrepo, destpeer, revs=revs,
635 664 bookmarks=srcrepo._bookmarks.keys())
636 665 else:
637 666 raise error.Abort(_("clone from remote to remote not supported")
638 667 )
639 668
640 669 cleandir = None
641 670
642 671 destrepo = destpeer.local()
643 672 if destrepo:
644 673 template = uimod.samplehgrcs['cloned']
645 674 fp = destrepo.vfs("hgrc", "wb")
646 675 u = util.url(abspath)
647 676 u.passwd = None
648 677 defaulturl = bytes(u)
649 678 fp.write(util.tonativeeol(template % defaulturl))
650 679 fp.close()
651 680
652 681 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
653 682
654 683 if update:
655 684 if update is not True:
656 685 checkout = srcpeer.lookup(update)
657 686 uprev = None
658 687 status = None
659 688 if checkout is not None:
660 689 try:
661 690 uprev = destrepo.lookup(checkout)
662 691 except error.RepoLookupError:
663 692 if update is not True:
664 693 try:
665 694 uprev = destrepo.lookup(update)
666 695 except error.RepoLookupError:
667 696 pass
668 697 if uprev is None:
669 698 try:
670 699 uprev = destrepo._bookmarks['@']
671 700 update = '@'
672 701 bn = destrepo[uprev].branch()
673 702 if bn == 'default':
674 703 status = _("updating to bookmark @\n")
675 704 else:
676 705 status = (_("updating to bookmark @ on branch %s\n")
677 706 % bn)
678 707 except KeyError:
679 708 try:
680 709 uprev = destrepo.branchtip('default')
681 710 except error.RepoLookupError:
682 711 uprev = destrepo.lookup('tip')
683 712 if not status:
684 713 bn = destrepo[uprev].branch()
685 714 status = _("updating to branch %s\n") % bn
686 715 destrepo.ui.status(status)
687 716 _update(destrepo, uprev)
688 717 if update in destrepo._bookmarks:
689 718 bookmarks.activate(destrepo, update)
690 719 finally:
691 720 release(srclock, destlock)
692 721 if cleandir is not None:
693 722 shutil.rmtree(cleandir, True)
694 723 if srcpeer is not None:
695 724 srcpeer.close()
696 725 return srcpeer, destpeer
697 726
698 727 def _showstats(repo, stats, quietempty=False):
699 728 if quietempty and not any(stats):
700 729 return
701 730 repo.ui.status(_("%d files updated, %d files merged, "
702 731 "%d files removed, %d files unresolved\n") % stats)
703 732
704 733 def updaterepo(repo, node, overwrite, updatecheck=None):
705 734 """Update the working directory to node.
706 735
707 736 When overwrite is set, changes are clobbered, merged else
708 737
709 738 returns stats (see pydoc mercurial.merge.applyupdates)"""
710 739 return mergemod.update(repo, node, False, overwrite,
711 740 labels=['working copy', 'destination'],
712 741 updatecheck=updatecheck)
713 742
714 743 def update(repo, node, quietempty=False, updatecheck=None):
715 744 """update the working directory to node"""
716 745 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
717 746 _showstats(repo, stats, quietempty)
718 747 if stats[3]:
719 748 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
720 749 return stats[3] > 0
721 750
722 751 # naming conflict in clone()
723 752 _update = update
724 753
725 754 def clean(repo, node, show_stats=True, quietempty=False):
726 755 """forcibly switch the working directory to node, clobbering changes"""
727 756 stats = updaterepo(repo, node, True)
728 757 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
729 758 if show_stats:
730 759 _showstats(repo, stats, quietempty)
731 760 return stats[3] > 0
732 761
733 762 # naming conflict in updatetotally()
734 763 _clean = clean
735 764
736 765 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
737 766 """Update the working directory with extra care for non-file components
738 767
739 768 This takes care of non-file components below:
740 769
741 770 :bookmark: might be advanced or (in)activated
742 771
743 772 This takes arguments below:
744 773
745 774 :checkout: to which revision the working directory is updated
746 775 :brev: a name, which might be a bookmark to be activated after updating
747 776 :clean: whether changes in the working directory can be discarded
748 777 :updatecheck: how to deal with a dirty working directory
749 778
750 779 Valid values for updatecheck are (None => linear):
751 780
752 781 * abort: abort if the working directory is dirty
753 782 * none: don't check (merge working directory changes into destination)
754 783 * linear: check that update is linear before merging working directory
755 784 changes into destination
756 785 * noconflict: check that the update does not result in file merges
757 786
758 787 This returns whether conflict is detected at updating or not.
759 788 """
760 789 if updatecheck is None:
761 790 updatecheck = ui.config('commands', 'update.check')
762 791 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
763 792 # If not configured, or invalid value configured
764 793 updatecheck = 'linear'
765 794 with repo.wlock():
766 795 movemarkfrom = None
767 796 warndest = False
768 797 if checkout is None:
769 798 updata = destutil.destupdate(repo, clean=clean)
770 799 checkout, movemarkfrom, brev = updata
771 800 warndest = True
772 801
773 802 if clean:
774 803 ret = _clean(repo, checkout)
775 804 else:
776 805 if updatecheck == 'abort':
777 806 cmdutil.bailifchanged(repo, merge=False)
778 807 updatecheck = 'none'
779 808 ret = _update(repo, checkout, updatecheck=updatecheck)
780 809
781 810 if not ret and movemarkfrom:
782 811 if movemarkfrom == repo['.'].node():
783 812 pass # no-op update
784 813 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
785 814 b = ui.label(repo._activebookmark, 'bookmarks.active')
786 815 ui.status(_("updating bookmark %s\n") % b)
787 816 else:
788 817 # this can happen with a non-linear update
789 818 b = ui.label(repo._activebookmark, 'bookmarks')
790 819 ui.status(_("(leaving bookmark %s)\n") % b)
791 820 bookmarks.deactivate(repo)
792 821 elif brev in repo._bookmarks:
793 822 if brev != repo._activebookmark:
794 823 b = ui.label(brev, 'bookmarks.active')
795 824 ui.status(_("(activating bookmark %s)\n") % b)
796 825 bookmarks.activate(repo, brev)
797 826 elif brev:
798 827 if repo._activebookmark:
799 828 b = ui.label(repo._activebookmark, 'bookmarks')
800 829 ui.status(_("(leaving bookmark %s)\n") % b)
801 830 bookmarks.deactivate(repo)
802 831
803 832 if warndest:
804 833 destutil.statusotherdests(ui, repo)
805 834
806 835 return ret
807 836
808 837 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
809 838 """Branch merge with node, resolving changes. Return true if any
810 839 unresolved conflicts."""
811 840 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
812 841 labels=labels)
813 842 _showstats(repo, stats)
814 843 if stats[3]:
815 844 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
816 845 "or 'hg update -C .' to abandon\n"))
817 846 elif remind:
818 847 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
819 848 return stats[3] > 0
820 849
821 850 def _incoming(displaychlist, subreporecurse, ui, repo, source,
822 851 opts, buffered=False):
823 852 """
824 853 Helper for incoming / gincoming.
825 854 displaychlist gets called with
826 855 (remoterepo, incomingchangesetlist, displayer) parameters,
827 856 and is supposed to contain only code that can't be unified.
828 857 """
829 858 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
830 859 other = peer(repo, opts, source)
831 860 ui.status(_('comparing with %s\n') % util.hidepassword(source))
832 861 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
833 862
834 863 if revs:
835 864 revs = [other.lookup(rev) for rev in revs]
836 865 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
837 866 revs, opts["bundle"], opts["force"])
838 867 try:
839 868 if not chlist:
840 869 ui.status(_("no changes found\n"))
841 870 return subreporecurse()
842 871 ui.pager('incoming')
843 872 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
844 873 displaychlist(other, chlist, displayer)
845 874 displayer.close()
846 875 finally:
847 876 cleanupfn()
848 877 subreporecurse()
849 878 return 0 # exit code is zero since we found incoming changes
850 879
851 880 def incoming(ui, repo, source, opts):
852 881 def subreporecurse():
853 882 ret = 1
854 883 if opts.get('subrepos'):
855 884 ctx = repo[None]
856 885 for subpath in sorted(ctx.substate):
857 886 sub = ctx.sub(subpath)
858 887 ret = min(ret, sub.incoming(ui, source, opts))
859 888 return ret
860 889
861 890 def display(other, chlist, displayer):
862 891 limit = cmdutil.loglimit(opts)
863 892 if opts.get('newest_first'):
864 893 chlist.reverse()
865 894 count = 0
866 895 for n in chlist:
867 896 if limit is not None and count >= limit:
868 897 break
869 898 parents = [p for p in other.changelog.parents(n) if p != nullid]
870 899 if opts.get('no_merges') and len(parents) == 2:
871 900 continue
872 901 count += 1
873 902 displayer.show(other[n])
874 903 return _incoming(display, subreporecurse, ui, repo, source, opts)
875 904
876 905 def _outgoing(ui, repo, dest, opts):
877 906 dest = ui.expandpath(dest or 'default-push', dest or 'default')
878 907 dest, branches = parseurl(dest, opts.get('branch'))
879 908 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
880 909 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
881 910 if revs:
882 911 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
883 912
884 913 other = peer(repo, opts, dest)
885 914 outgoing = discovery.findcommonoutgoing(repo, other, revs,
886 915 force=opts.get('force'))
887 916 o = outgoing.missing
888 917 if not o:
889 918 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
890 919 return o, other
891 920
892 921 def outgoing(ui, repo, dest, opts):
893 922 def recurse():
894 923 ret = 1
895 924 if opts.get('subrepos'):
896 925 ctx = repo[None]
897 926 for subpath in sorted(ctx.substate):
898 927 sub = ctx.sub(subpath)
899 928 ret = min(ret, sub.outgoing(ui, dest, opts))
900 929 return ret
901 930
902 931 limit = cmdutil.loglimit(opts)
903 932 o, other = _outgoing(ui, repo, dest, opts)
904 933 if not o:
905 934 cmdutil.outgoinghooks(ui, repo, other, opts, o)
906 935 return recurse()
907 936
908 937 if opts.get('newest_first'):
909 938 o.reverse()
910 939 ui.pager('outgoing')
911 940 displayer = cmdutil.show_changeset(ui, repo, opts)
912 941 count = 0
913 942 for n in o:
914 943 if limit is not None and count >= limit:
915 944 break
916 945 parents = [p for p in repo.changelog.parents(n) if p != nullid]
917 946 if opts.get('no_merges') and len(parents) == 2:
918 947 continue
919 948 count += 1
920 949 displayer.show(repo[n])
921 950 displayer.close()
922 951 cmdutil.outgoinghooks(ui, repo, other, opts, o)
923 952 recurse()
924 953 return 0 # exit code is zero since we found outgoing changes
925 954
926 955 def verify(repo):
927 956 """verify the consistency of a repository"""
928 957 ret = verifymod.verify(repo)
929 958
930 959 # Broken subrepo references in hidden csets don't seem worth worrying about,
931 960 # since they can't be pushed/pulled, and --hidden can be used if they are a
932 961 # concern.
933 962
934 963 # pathto() is needed for -R case
935 964 revs = repo.revs("filelog(%s)",
936 965 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
937 966
938 967 if revs:
939 968 repo.ui.status(_('checking subrepo links\n'))
940 969 for rev in revs:
941 970 ctx = repo[rev]
942 971 try:
943 972 for subpath in ctx.substate:
944 973 try:
945 974 ret = (ctx.sub(subpath, allowcreate=False).verify()
946 975 or ret)
947 976 except error.RepoError as e:
948 977 repo.ui.warn(('%s: %s\n') % (rev, e))
949 978 except Exception:
950 979 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
951 980 node.short(ctx.node()))
952 981
953 982 return ret
954 983
955 984 def remoteui(src, opts):
956 985 'build a remote ui from ui or repo and opts'
957 986 if util.safehasattr(src, 'baseui'): # looks like a repository
958 987 dst = src.baseui.copy() # drop repo-specific config
959 988 src = src.ui # copy target options from repo
960 989 else: # assume it's a global ui object
961 990 dst = src.copy() # keep all global options
962 991
963 992 # copy ssh-specific options
964 993 for o in 'ssh', 'remotecmd':
965 994 v = opts.get(o) or src.config('ui', o)
966 995 if v:
967 996 dst.setconfig("ui", o, v, 'copied')
968 997
969 998 # copy bundle-specific options
970 999 r = src.config('bundle', 'mainreporoot')
971 1000 if r:
972 1001 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
973 1002
974 1003 # copy selected local settings to the remote ui
975 1004 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
976 1005 for key, val in src.configitems(sect):
977 1006 dst.setconfig(sect, key, val, 'copied')
978 1007 v = src.config('web', 'cacerts')
979 1008 if v:
980 1009 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
981 1010
982 1011 return dst
983 1012
984 1013 # Files of interest
985 1014 # Used to check if the repository has changed looking at mtime and size of
986 1015 # these files.
987 1016 foi = [('spath', '00changelog.i'),
988 1017 ('spath', 'phaseroots'), # ! phase can change content at the same size
989 1018 ('spath', 'obsstore'),
990 1019 ('path', 'bookmarks'), # ! bookmark can change content at the same size
991 1020 ]
992 1021
993 1022 class cachedlocalrepo(object):
994 1023 """Holds a localrepository that can be cached and reused."""
995 1024
996 1025 def __init__(self, repo):
997 1026 """Create a new cached repo from an existing repo.
998 1027
999 1028 We assume the passed in repo was recently created. If the
1000 1029 repo has changed between when it was created and when it was
1001 1030 turned into a cache, it may not refresh properly.
1002 1031 """
1003 1032 assert isinstance(repo, localrepo.localrepository)
1004 1033 self._repo = repo
1005 1034 self._state, self.mtime = self._repostate()
1006 1035 self._filtername = repo.filtername
1007 1036
1008 1037 def fetch(self):
1009 1038 """Refresh (if necessary) and return a repository.
1010 1039
1011 1040 If the cached instance is out of date, it will be recreated
1012 1041 automatically and returned.
1013 1042
1014 1043 Returns a tuple of the repo and a boolean indicating whether a new
1015 1044 repo instance was created.
1016 1045 """
1017 1046 # We compare the mtimes and sizes of some well-known files to
1018 1047 # determine if the repo changed. This is not precise, as mtimes
1019 1048 # are susceptible to clock skew and imprecise filesystems and
1020 1049 # file content can change while maintaining the same size.
1021 1050
1022 1051 state, mtime = self._repostate()
1023 1052 if state == self._state:
1024 1053 return self._repo, False
1025 1054
1026 1055 repo = repository(self._repo.baseui, self._repo.url())
1027 1056 if self._filtername:
1028 1057 self._repo = repo.filtered(self._filtername)
1029 1058 else:
1030 1059 self._repo = repo.unfiltered()
1031 1060 self._state = state
1032 1061 self.mtime = mtime
1033 1062
1034 1063 return self._repo, True
1035 1064
1036 1065 def _repostate(self):
1037 1066 state = []
1038 1067 maxmtime = -1
1039 1068 for attr, fname in foi:
1040 1069 prefix = getattr(self._repo, attr)
1041 1070 p = os.path.join(prefix, fname)
1042 1071 try:
1043 1072 st = os.stat(p)
1044 1073 except OSError:
1045 1074 st = os.stat(prefix)
1046 1075 state.append((st.st_mtime, st.st_size))
1047 1076 maxmtime = max(maxmtime, st.st_mtime)
1048 1077
1049 1078 return tuple(state), maxmtime
1050 1079
1051 1080 def copy(self):
1052 1081 """Obtain a copy of this class instance.
1053 1082
1054 1083 A new localrepository instance is obtained. The new instance should be
1055 1084 completely independent of the original.
1056 1085 """
1057 1086 repo = repository(self._repo.baseui, self._repo.origroot)
1058 1087 if self._filtername:
1059 1088 repo = repo.filtered(self._filtername)
1060 1089 else:
1061 1090 repo = repo.unfiltered()
1062 1091 c = cachedlocalrepo(repo)
1063 1092 c._state = self._state
1064 1093 c.mtime = self.mtime
1065 1094 return c
General Comments 0
You need to be logged in to leave comments. Login now