##// END OF EJS Templates
hg: have `updatetotally` more thoroughly check updatecheck argument (API)...
Augie Fackler -
r43241:ee1ef76d default
parent child Browse files
Show More
@@ -1,1245 +1,1251 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15 import stat
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 nullid,
20 20 )
21 21
22 22 from . import (
23 23 bookmarks,
24 24 bundlerepo,
25 25 cacheutil,
26 26 cmdutil,
27 27 destutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 extensions,
32 32 httppeer,
33 33 localrepo,
34 34 lock,
35 35 logcmdutil,
36 36 logexchange,
37 37 merge as mergemod,
38 38 narrowspec,
39 39 node,
40 40 phases,
41 41 pycompat,
42 42 scmutil,
43 43 sshpeer,
44 44 statichttprepo,
45 45 ui as uimod,
46 46 unionrepo,
47 47 url,
48 48 util,
49 49 verify as verifymod,
50 50 vfs as vfsmod,
51 51 )
52 52
53 53 from .interfaces import (
54 54 repository as repositorymod,
55 55 )
56 56
57 57 release = lock.release
58 58
59 59 # shared features
60 60 sharedbookmarks = 'bookmarks'
61 61
62 62 def _local(path):
63 63 path = util.expandpath(util.urllocalpath(path))
64 64
65 65 try:
66 66 isfile = os.path.isfile(path)
67 67 # Python 2 raises TypeError, Python 3 ValueError.
68 68 except (TypeError, ValueError) as e:
69 69 raise error.Abort(_('invalid path %s: %s') % (
70 70 path, pycompat.bytestr(e)))
71 71
72 72 return isfile and bundlerepo or localrepo
73 73
74 74 def addbranchrevs(lrepo, other, branches, revs):
75 75 peer = other.peer() # a courtesy to callers using a localrepo for other
76 76 hashbranch, branches = branches
77 77 if not hashbranch and not branches:
78 78 x = revs or None
79 79 if revs:
80 80 y = revs[0]
81 81 else:
82 82 y = None
83 83 return x, y
84 84 if revs:
85 85 revs = list(revs)
86 86 else:
87 87 revs = []
88 88
89 89 if not peer.capable('branchmap'):
90 90 if branches:
91 91 raise error.Abort(_("remote branch lookup not supported"))
92 92 revs.append(hashbranch)
93 93 return revs, revs[0]
94 94
95 95 with peer.commandexecutor() as e:
96 96 branchmap = e.callcommand('branchmap', {}).result()
97 97
98 98 def primary(branch):
99 99 if branch == '.':
100 100 if not lrepo:
101 101 raise error.Abort(_("dirstate branch not accessible"))
102 102 branch = lrepo.dirstate.branch()
103 103 if branch in branchmap:
104 104 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
105 105 return True
106 106 else:
107 107 return False
108 108
109 109 for branch in branches:
110 110 if not primary(branch):
111 111 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
112 112 if hashbranch:
113 113 if not primary(hashbranch):
114 114 revs.append(hashbranch)
115 115 return revs, revs[0]
116 116
117 117 def parseurl(path, branches=None):
118 118 '''parse url#branch, returning (url, (branch, branches))'''
119 119
120 120 u = util.url(path)
121 121 branch = None
122 122 if u.fragment:
123 123 branch = u.fragment
124 124 u.fragment = None
125 125 return bytes(u), (branch, branches or [])
126 126
127 127 schemes = {
128 128 'bundle': bundlerepo,
129 129 'union': unionrepo,
130 130 'file': _local,
131 131 'http': httppeer,
132 132 'https': httppeer,
133 133 'ssh': sshpeer,
134 134 'static-http': statichttprepo,
135 135 }
136 136
137 137 def _peerlookup(path):
138 138 u = util.url(path)
139 139 scheme = u.scheme or 'file'
140 140 thing = schemes.get(scheme) or schemes['file']
141 141 try:
142 142 return thing(path)
143 143 except TypeError:
144 144 # we can't test callable(thing) because 'thing' can be an unloaded
145 145 # module that implements __call__
146 146 if not util.safehasattr(thing, 'instance'):
147 147 raise
148 148 return thing
149 149
150 150 def islocal(repo):
151 151 '''return true if repo (or path pointing to repo) is local'''
152 152 if isinstance(repo, bytes):
153 153 try:
154 154 return _peerlookup(repo).islocal(repo)
155 155 except AttributeError:
156 156 return False
157 157 return repo.local()
158 158
159 159 def openpath(ui, path, sendaccept=True):
160 160 '''open path with open if local, url.open if remote'''
161 161 pathurl = util.url(path, parsequery=False, parsefragment=False)
162 162 if pathurl.islocal():
163 163 return util.posixfile(pathurl.localpath(), 'rb')
164 164 else:
165 165 return url.open(ui, path, sendaccept=sendaccept)
166 166
167 167 # a list of (ui, repo) functions called for wire peer initialization
168 168 wirepeersetupfuncs = []
169 169
170 170 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
171 171 intents=None, createopts=None):
172 172 """return a repository object for the specified path"""
173 173 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
174 174 createopts=createopts)
175 175 ui = getattr(obj, "ui", ui)
176 176 for f in presetupfuncs or []:
177 177 f(ui, obj)
178 178 ui.log(b'extension', b'- executing reposetup hooks\n')
179 179 with util.timedcm('all reposetup') as allreposetupstats:
180 180 for name, module in extensions.extensions(ui):
181 181 ui.log(b'extension', b' - running reposetup for %s\n', name)
182 182 hook = getattr(module, 'reposetup', None)
183 183 if hook:
184 184 with util.timedcm('reposetup %r', name) as stats:
185 185 hook(ui, obj)
186 186 ui.log(b'extension', b' > reposetup for %s took %s\n',
187 187 name, stats)
188 188 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
189 189 if not obj.local():
190 190 for f in wirepeersetupfuncs:
191 191 f(ui, obj)
192 192 return obj
193 193
194 194 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
195 195 createopts=None):
196 196 """return a repository object for the specified path"""
197 197 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
198 198 intents=intents, createopts=createopts)
199 199 repo = peer.local()
200 200 if not repo:
201 201 raise error.Abort(_("repository '%s' is not local") %
202 202 (path or peer.url()))
203 203 return repo.filtered('visible')
204 204
205 205 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
206 206 '''return a repository peer for the specified path'''
207 207 rui = remoteui(uiorrepo, opts)
208 208 return _peerorrepo(rui, path, create, intents=intents,
209 209 createopts=createopts).peer()
210 210
211 211 def defaultdest(source):
212 212 '''return default destination of clone if none is given
213 213
214 214 >>> defaultdest(b'foo')
215 215 'foo'
216 216 >>> defaultdest(b'/foo/bar')
217 217 'bar'
218 218 >>> defaultdest(b'/')
219 219 ''
220 220 >>> defaultdest(b'')
221 221 ''
222 222 >>> defaultdest(b'http://example.org/')
223 223 ''
224 224 >>> defaultdest(b'http://example.org/foo/')
225 225 'foo'
226 226 '''
227 227 path = util.url(source).path
228 228 if not path:
229 229 return ''
230 230 return os.path.basename(os.path.normpath(path))
231 231
232 232 def sharedreposource(repo):
233 233 """Returns repository object for source repository of a shared repo.
234 234
235 235 If repo is not a shared repository, returns None.
236 236 """
237 237 if repo.sharedpath == repo.path:
238 238 return None
239 239
240 240 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
241 241 return repo.srcrepo
242 242
243 243 # the sharedpath always ends in the .hg; we want the path to the repo
244 244 source = repo.vfs.split(repo.sharedpath)[0]
245 245 srcurl, branches = parseurl(source)
246 246 srcrepo = repository(repo.ui, srcurl)
247 247 repo.srcrepo = srcrepo
248 248 return srcrepo
249 249
250 250 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
251 251 relative=False):
252 252 '''create a shared repository'''
253 253
254 254 if not islocal(source):
255 255 raise error.Abort(_('can only share local repositories'))
256 256
257 257 if not dest:
258 258 dest = defaultdest(source)
259 259 else:
260 260 dest = ui.expandpath(dest)
261 261
262 262 if isinstance(source, bytes):
263 263 origsource = ui.expandpath(source)
264 264 source, branches = parseurl(origsource)
265 265 srcrepo = repository(ui, source)
266 266 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
267 267 else:
268 268 srcrepo = source.local()
269 269 checkout = None
270 270
271 271 shareditems = set()
272 272 if bookmarks:
273 273 shareditems.add(sharedbookmarks)
274 274
275 275 r = repository(ui, dest, create=True, createopts={
276 276 'sharedrepo': srcrepo,
277 277 'sharedrelative': relative,
278 278 'shareditems': shareditems,
279 279 })
280 280
281 281 postshare(srcrepo, r, defaultpath=defaultpath)
282 282 r = repository(ui, dest)
283 283 _postshareupdate(r, update, checkout=checkout)
284 284 return r
285 285
286 286 def unshare(ui, repo):
287 287 """convert a shared repository to a normal one
288 288
289 289 Copy the store data to the repo and remove the sharedpath data.
290 290
291 291 Returns a new repository object representing the unshared repository.
292 292
293 293 The passed repository object is not usable after this function is
294 294 called.
295 295 """
296 296
297 297 with repo.lock():
298 298 # we use locks here because if we race with commit, we
299 299 # can end up with extra data in the cloned revlogs that's
300 300 # not pointed to by changesets, thus causing verify to
301 301 # fail
302 302 destlock = copystore(ui, repo, repo.path)
303 303 with destlock or util.nullcontextmanager():
304 304
305 305 sharefile = repo.vfs.join('sharedpath')
306 306 util.rename(sharefile, sharefile + '.old')
307 307
308 308 repo.requirements.discard('shared')
309 309 repo.requirements.discard('relshared')
310 310 repo._writerequirements()
311 311
312 312 # Removing share changes some fundamental properties of the repo instance.
313 313 # So we instantiate a new repo object and operate on it rather than
314 314 # try to keep the existing repo usable.
315 315 newrepo = repository(repo.baseui, repo.root, create=False)
316 316
317 317 # TODO: figure out how to access subrepos that exist, but were previously
318 318 # removed from .hgsub
319 319 c = newrepo['.']
320 320 subs = c.substate
321 321 for s in sorted(subs):
322 322 c.sub(s).unshare()
323 323
324 324 localrepo.poisonrepository(repo)
325 325
326 326 return newrepo
327 327
328 328 def postshare(sourcerepo, destrepo, defaultpath=None):
329 329 """Called after a new shared repo is created.
330 330
331 331 The new repo only has a requirements file and pointer to the source.
332 332 This function configures additional shared data.
333 333
334 334 Extensions can wrap this function and write additional entries to
335 335 destrepo/.hg/shared to indicate additional pieces of data to be shared.
336 336 """
337 337 default = defaultpath or sourcerepo.ui.config('paths', 'default')
338 338 if default:
339 339 template = ('[paths]\n'
340 340 'default = %s\n')
341 341 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
342 342 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
343 343 with destrepo.wlock():
344 344 narrowspec.copytoworkingcopy(destrepo)
345 345
346 346 def _postshareupdate(repo, update, checkout=None):
347 347 """Maybe perform a working directory update after a shared repo is created.
348 348
349 349 ``update`` can be a boolean or a revision to update to.
350 350 """
351 351 if not update:
352 352 return
353 353
354 354 repo.ui.status(_("updating working directory\n"))
355 355 if update is not True:
356 356 checkout = update
357 357 for test in (checkout, 'default', 'tip'):
358 358 if test is None:
359 359 continue
360 360 try:
361 361 uprev = repo.lookup(test)
362 362 break
363 363 except error.RepoLookupError:
364 364 continue
365 365 _update(repo, uprev)
366 366
367 367 def copystore(ui, srcrepo, destpath):
368 368 '''copy files from store of srcrepo in destpath
369 369
370 370 returns destlock
371 371 '''
372 372 destlock = None
373 373 try:
374 374 hardlink = None
375 375 topic = _('linking') if hardlink else _('copying')
376 376 with ui.makeprogress(topic, unit=_('files')) as progress:
377 377 num = 0
378 378 srcpublishing = srcrepo.publishing()
379 379 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
380 380 dstvfs = vfsmod.vfs(destpath)
381 381 for f in srcrepo.store.copylist():
382 382 if srcpublishing and f.endswith('phaseroots'):
383 383 continue
384 384 dstbase = os.path.dirname(f)
385 385 if dstbase and not dstvfs.exists(dstbase):
386 386 dstvfs.mkdir(dstbase)
387 387 if srcvfs.exists(f):
388 388 if f.endswith('data'):
389 389 # 'dstbase' may be empty (e.g. revlog format 0)
390 390 lockfile = os.path.join(dstbase, "lock")
391 391 # lock to avoid premature writing to the target
392 392 destlock = lock.lock(dstvfs, lockfile)
393 393 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
394 394 hardlink, progress)
395 395 num += n
396 396 if hardlink:
397 397 ui.debug("linked %d files\n" % num)
398 398 else:
399 399 ui.debug("copied %d files\n" % num)
400 400 return destlock
401 401 except: # re-raises
402 402 release(destlock)
403 403 raise
404 404
405 405 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
406 406 rev=None, update=True, stream=False):
407 407 """Perform a clone using a shared repo.
408 408
409 409 The store for the repository will be located at <sharepath>/.hg. The
410 410 specified revisions will be cloned or pulled from "source". A shared repo
411 411 will be created at "dest" and a working copy will be created if "update" is
412 412 True.
413 413 """
414 414 revs = None
415 415 if rev:
416 416 if not srcpeer.capable('lookup'):
417 417 raise error.Abort(_("src repository does not support "
418 418 "revision lookup and so doesn't "
419 419 "support clone by revision"))
420 420
421 421 # TODO this is batchable.
422 422 remoterevs = []
423 423 for r in rev:
424 424 with srcpeer.commandexecutor() as e:
425 425 remoterevs.append(e.callcommand('lookup', {
426 426 'key': r,
427 427 }).result())
428 428 revs = remoterevs
429 429
430 430 # Obtain a lock before checking for or cloning the pooled repo otherwise
431 431 # 2 clients may race creating or populating it.
432 432 pooldir = os.path.dirname(sharepath)
433 433 # lock class requires the directory to exist.
434 434 try:
435 435 util.makedir(pooldir, False)
436 436 except OSError as e:
437 437 if e.errno != errno.EEXIST:
438 438 raise
439 439
440 440 poolvfs = vfsmod.vfs(pooldir)
441 441 basename = os.path.basename(sharepath)
442 442
443 443 with lock.lock(poolvfs, '%s.lock' % basename):
444 444 if os.path.exists(sharepath):
445 445 ui.status(_('(sharing from existing pooled repository %s)\n') %
446 446 basename)
447 447 else:
448 448 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
449 449 # Always use pull mode because hardlinks in share mode don't work
450 450 # well. Never update because working copies aren't necessary in
451 451 # share mode.
452 452 clone(ui, peeropts, source, dest=sharepath, pull=True,
453 453 revs=rev, update=False, stream=stream)
454 454
455 455 # Resolve the value to put in [paths] section for the source.
456 456 if islocal(source):
457 457 defaultpath = os.path.abspath(util.urllocalpath(source))
458 458 else:
459 459 defaultpath = source
460 460
461 461 sharerepo = repository(ui, path=sharepath)
462 462 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
463 463 defaultpath=defaultpath)
464 464
465 465 # We need to perform a pull against the dest repo to fetch bookmarks
466 466 # and other non-store data that isn't shared by default. In the case of
467 467 # non-existing shared repo, this means we pull from the remote twice. This
468 468 # is a bit weird. But at the time it was implemented, there wasn't an easy
469 469 # way to pull just non-changegroup data.
470 470 exchange.pull(destrepo, srcpeer, heads=revs)
471 471
472 472 _postshareupdate(destrepo, update)
473 473
474 474 return srcpeer, peer(ui, peeropts, dest)
475 475
476 476 # Recomputing branch cache might be slow on big repos,
477 477 # so just copy it
478 478 def _copycache(srcrepo, dstcachedir, fname):
479 479 """copy a cache from srcrepo to destcachedir (if it exists)"""
480 480 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
481 481 dstbranchcache = os.path.join(dstcachedir, fname)
482 482 if os.path.exists(srcbranchcache):
483 483 if not os.path.exists(dstcachedir):
484 484 os.mkdir(dstcachedir)
485 485 util.copyfile(srcbranchcache, dstbranchcache)
486 486
487 487 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
488 488 update=True, stream=False, branch=None, shareopts=None,
489 489 storeincludepats=None, storeexcludepats=None, depth=None):
490 490 """Make a copy of an existing repository.
491 491
492 492 Create a copy of an existing repository in a new directory. The
493 493 source and destination are URLs, as passed to the repository
494 494 function. Returns a pair of repository peers, the source and
495 495 newly created destination.
496 496
497 497 The location of the source is added to the new repository's
498 498 .hg/hgrc file, as the default to be used for future pulls and
499 499 pushes.
500 500
501 501 If an exception is raised, the partly cloned/updated destination
502 502 repository will be deleted.
503 503
504 504 Arguments:
505 505
506 506 source: repository object or URL
507 507
508 508 dest: URL of destination repository to create (defaults to base
509 509 name of source repository)
510 510
511 511 pull: always pull from source repository, even in local case or if the
512 512 server prefers streaming
513 513
514 514 stream: stream raw data uncompressed from repository (fast over
515 515 LAN, slow over WAN)
516 516
517 517 revs: revision to clone up to (implies pull=True)
518 518
519 519 update: update working directory after clone completes, if
520 520 destination is local repository (True means update to default rev,
521 521 anything else is treated as a revision)
522 522
523 523 branch: branches to clone
524 524
525 525 shareopts: dict of options to control auto sharing behavior. The "pool" key
526 526 activates auto sharing mode and defines the directory for stores. The
527 527 "mode" key determines how to construct the directory name of the shared
528 528 repository. "identity" means the name is derived from the node of the first
529 529 changeset in the repository. "remote" means the name is derived from the
530 530 remote's path/URL. Defaults to "identity."
531 531
532 532 storeincludepats and storeexcludepats: sets of file patterns to include and
533 533 exclude in the repository copy, respectively. If not defined, all files
534 534 will be included (a "full" clone). Otherwise a "narrow" clone containing
535 535 only the requested files will be performed. If ``storeincludepats`` is not
536 536 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
537 537 ``path:.``. If both are empty sets, no files will be cloned.
538 538 """
539 539
540 540 if isinstance(source, bytes):
541 541 origsource = ui.expandpath(source)
542 542 source, branches = parseurl(origsource, branch)
543 543 srcpeer = peer(ui, peeropts, source)
544 544 else:
545 545 srcpeer = source.peer() # in case we were called with a localrepo
546 546 branches = (None, branch or [])
547 547 origsource = source = srcpeer.url()
548 548 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
549 549
550 550 if dest is None:
551 551 dest = defaultdest(source)
552 552 if dest:
553 553 ui.status(_("destination directory: %s\n") % dest)
554 554 else:
555 555 dest = ui.expandpath(dest)
556 556
557 557 dest = util.urllocalpath(dest)
558 558 source = util.urllocalpath(source)
559 559
560 560 if not dest:
561 561 raise error.Abort(_("empty destination path is not valid"))
562 562
563 563 destvfs = vfsmod.vfs(dest, expandpath=True)
564 564 if destvfs.lexists():
565 565 if not destvfs.isdir():
566 566 raise error.Abort(_("destination '%s' already exists") % dest)
567 567 elif destvfs.listdir():
568 568 raise error.Abort(_("destination '%s' is not empty") % dest)
569 569
570 570 createopts = {}
571 571 narrow = False
572 572
573 573 if storeincludepats is not None:
574 574 narrowspec.validatepatterns(storeincludepats)
575 575 narrow = True
576 576
577 577 if storeexcludepats is not None:
578 578 narrowspec.validatepatterns(storeexcludepats)
579 579 narrow = True
580 580
581 581 if narrow:
582 582 # Include everything by default if only exclusion patterns defined.
583 583 if storeexcludepats and not storeincludepats:
584 584 storeincludepats = {'path:.'}
585 585
586 586 createopts['narrowfiles'] = True
587 587
588 588 if depth:
589 589 createopts['shallowfilestore'] = True
590 590
591 591 if srcpeer.capable(b'lfs-serve'):
592 592 # Repository creation honors the config if it disabled the extension, so
593 593 # we can't just announce that lfs will be enabled. This check avoids
594 594 # saying that lfs will be enabled, and then saying it's an unknown
595 595 # feature. The lfs creation option is set in either case so that a
596 596 # requirement is added. If the extension is explicitly disabled but the
597 597 # requirement is set, the clone aborts early, before transferring any
598 598 # data.
599 599 createopts['lfs'] = True
600 600
601 601 if extensions.disabledext('lfs'):
602 602 ui.status(_('(remote is using large file support (lfs), but it is '
603 603 'explicitly disabled in the local configuration)\n'))
604 604 else:
605 605 ui.status(_('(remote is using large file support (lfs); lfs will '
606 606 'be enabled for this repository)\n'))
607 607
608 608 shareopts = shareopts or {}
609 609 sharepool = shareopts.get('pool')
610 610 sharenamemode = shareopts.get('mode')
611 611 if sharepool and islocal(dest):
612 612 sharepath = None
613 613 if sharenamemode == 'identity':
614 614 # Resolve the name from the initial changeset in the remote
615 615 # repository. This returns nullid when the remote is empty. It
616 616 # raises RepoLookupError if revision 0 is filtered or otherwise
617 617 # not available. If we fail to resolve, sharing is not enabled.
618 618 try:
619 619 with srcpeer.commandexecutor() as e:
620 620 rootnode = e.callcommand('lookup', {
621 621 'key': '0',
622 622 }).result()
623 623
624 624 if rootnode != node.nullid:
625 625 sharepath = os.path.join(sharepool, node.hex(rootnode))
626 626 else:
627 627 ui.status(_('(not using pooled storage: '
628 628 'remote appears to be empty)\n'))
629 629 except error.RepoLookupError:
630 630 ui.status(_('(not using pooled storage: '
631 631 'unable to resolve identity of remote)\n'))
632 632 elif sharenamemode == 'remote':
633 633 sharepath = os.path.join(
634 634 sharepool, node.hex(hashlib.sha1(source).digest()))
635 635 else:
636 636 raise error.Abort(_('unknown share naming mode: %s') %
637 637 sharenamemode)
638 638
639 639 # TODO this is a somewhat arbitrary restriction.
640 640 if narrow:
641 641 ui.status(_('(pooled storage not supported for narrow clones)\n'))
642 642 sharepath = None
643 643
644 644 if sharepath:
645 645 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
646 646 dest, pull=pull, rev=revs, update=update,
647 647 stream=stream)
648 648
649 649 srclock = destlock = cleandir = None
650 650 srcrepo = srcpeer.local()
651 651 try:
652 652 abspath = origsource
653 653 if islocal(origsource):
654 654 abspath = os.path.abspath(util.urllocalpath(origsource))
655 655
656 656 if islocal(dest):
657 657 cleandir = dest
658 658
659 659 copy = False
660 660 if (srcrepo and srcrepo.cancopy() and islocal(dest)
661 661 and not phases.hassecret(srcrepo)):
662 662 copy = not pull and not revs
663 663
664 664 # TODO this is a somewhat arbitrary restriction.
665 665 if narrow:
666 666 copy = False
667 667
668 668 if copy:
669 669 try:
670 670 # we use a lock here because if we race with commit, we
671 671 # can end up with extra data in the cloned revlogs that's
672 672 # not pointed to by changesets, thus causing verify to
673 673 # fail
674 674 srclock = srcrepo.lock(wait=False)
675 675 except error.LockError:
676 676 copy = False
677 677
678 678 if copy:
679 679 srcrepo.hook('preoutgoing', throw=True, source='clone')
680 680 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
681 681 if not os.path.exists(dest):
682 682 util.makedirs(dest)
683 683 else:
684 684 # only clean up directories we create ourselves
685 685 cleandir = hgdir
686 686 try:
687 687 destpath = hgdir
688 688 util.makedir(destpath, notindexed=True)
689 689 except OSError as inst:
690 690 if inst.errno == errno.EEXIST:
691 691 cleandir = None
692 692 raise error.Abort(_("destination '%s' already exists")
693 693 % dest)
694 694 raise
695 695
696 696 destlock = copystore(ui, srcrepo, destpath)
697 697 # copy bookmarks over
698 698 srcbookmarks = srcrepo.vfs.join('bookmarks')
699 699 dstbookmarks = os.path.join(destpath, 'bookmarks')
700 700 if os.path.exists(srcbookmarks):
701 701 util.copyfile(srcbookmarks, dstbookmarks)
702 702
703 703 dstcachedir = os.path.join(destpath, 'cache')
704 704 for cache in cacheutil.cachetocopy(srcrepo):
705 705 _copycache(srcrepo, dstcachedir, cache)
706 706
707 707 # we need to re-init the repo after manually copying the data
708 708 # into it
709 709 destpeer = peer(srcrepo, peeropts, dest)
710 710 srcrepo.hook('outgoing', source='clone',
711 711 node=node.hex(node.nullid))
712 712 else:
713 713 try:
714 714 # only pass ui when no srcrepo
715 715 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
716 716 createopts=createopts)
717 717 except OSError as inst:
718 718 if inst.errno == errno.EEXIST:
719 719 cleandir = None
720 720 raise error.Abort(_("destination '%s' already exists")
721 721 % dest)
722 722 raise
723 723
724 724 if revs:
725 725 if not srcpeer.capable('lookup'):
726 726 raise error.Abort(_("src repository does not support "
727 727 "revision lookup and so doesn't "
728 728 "support clone by revision"))
729 729
730 730 # TODO this is batchable.
731 731 remoterevs = []
732 732 for rev in revs:
733 733 with srcpeer.commandexecutor() as e:
734 734 remoterevs.append(e.callcommand('lookup', {
735 735 'key': rev,
736 736 }).result())
737 737 revs = remoterevs
738 738
739 739 checkout = revs[0]
740 740 else:
741 741 revs = None
742 742 local = destpeer.local()
743 743 if local:
744 744 if narrow:
745 745 with local.wlock(), local.lock():
746 746 local.setnarrowpats(storeincludepats, storeexcludepats)
747 747 narrowspec.copytoworkingcopy(local)
748 748
749 749 u = util.url(abspath)
750 750 defaulturl = bytes(u)
751 751 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
752 752 if not stream:
753 753 if pull:
754 754 stream = False
755 755 else:
756 756 stream = None
757 757 # internal config: ui.quietbookmarkmove
758 758 overrides = {('ui', 'quietbookmarkmove'): True}
759 759 with local.ui.configoverride(overrides, 'clone'):
760 760 exchange.pull(local, srcpeer, revs,
761 761 streamclonerequested=stream,
762 762 includepats=storeincludepats,
763 763 excludepats=storeexcludepats,
764 764 depth=depth)
765 765 elif srcrepo:
766 766 # TODO lift restriction once exchange.push() accepts narrow
767 767 # push.
768 768 if narrow:
769 769 raise error.Abort(_('narrow clone not available for '
770 770 'remote destinations'))
771 771
772 772 exchange.push(srcrepo, destpeer, revs=revs,
773 773 bookmarks=srcrepo._bookmarks.keys())
774 774 else:
775 775 raise error.Abort(_("clone from remote to remote not supported")
776 776 )
777 777
778 778 cleandir = None
779 779
780 780 destrepo = destpeer.local()
781 781 if destrepo:
782 782 template = uimod.samplehgrcs['cloned']
783 783 u = util.url(abspath)
784 784 u.passwd = None
785 785 defaulturl = bytes(u)
786 786 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
787 787 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
788 788
789 789 if ui.configbool('experimental', 'remotenames'):
790 790 logexchange.pullremotenames(destrepo, srcpeer)
791 791
792 792 if update:
793 793 if update is not True:
794 794 with srcpeer.commandexecutor() as e:
795 795 checkout = e.callcommand('lookup', {
796 796 'key': update,
797 797 }).result()
798 798
799 799 uprev = None
800 800 status = None
801 801 if checkout is not None:
802 802 # Some extensions (at least hg-git and hg-subversion) have
803 803 # a peer.lookup() implementation that returns a name instead
804 804 # of a nodeid. We work around it here until we've figured
805 805 # out a better solution.
806 806 if len(checkout) == 20 and checkout in destrepo:
807 807 uprev = checkout
808 808 elif scmutil.isrevsymbol(destrepo, checkout):
809 809 uprev = scmutil.revsymbol(destrepo, checkout).node()
810 810 else:
811 811 if update is not True:
812 812 try:
813 813 uprev = destrepo.lookup(update)
814 814 except error.RepoLookupError:
815 815 pass
816 816 if uprev is None:
817 817 try:
818 818 uprev = destrepo._bookmarks['@']
819 819 update = '@'
820 820 bn = destrepo[uprev].branch()
821 821 if bn == 'default':
822 822 status = _("updating to bookmark @\n")
823 823 else:
824 824 status = (_("updating to bookmark @ on branch %s\n")
825 825 % bn)
826 826 except KeyError:
827 827 try:
828 828 uprev = destrepo.branchtip('default')
829 829 except error.RepoLookupError:
830 830 uprev = destrepo.lookup('tip')
831 831 if not status:
832 832 bn = destrepo[uprev].branch()
833 833 status = _("updating to branch %s\n") % bn
834 834 destrepo.ui.status(status)
835 835 _update(destrepo, uprev)
836 836 if update in destrepo._bookmarks:
837 837 bookmarks.activate(destrepo, update)
838 838 finally:
839 839 release(srclock, destlock)
840 840 if cleandir is not None:
841 841 shutil.rmtree(cleandir, True)
842 842 if srcpeer is not None:
843 843 srcpeer.close()
844 844 return srcpeer, destpeer
845 845
846 846 def _showstats(repo, stats, quietempty=False):
847 847 if quietempty and stats.isempty():
848 848 return
849 849 repo.ui.status(_("%d files updated, %d files merged, "
850 850 "%d files removed, %d files unresolved\n") % (
851 851 stats.updatedcount, stats.mergedcount,
852 852 stats.removedcount, stats.unresolvedcount))
853 853
854 854 def updaterepo(repo, node, overwrite, updatecheck=None):
855 855 """Update the working directory to node.
856 856
857 857 When overwrite is set, changes are clobbered, merged else
858 858
859 859 returns stats (see pydoc mercurial.merge.applyupdates)"""
860 860 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
861 861 labels=['working copy', 'destination'],
862 862 updatecheck=updatecheck)
863 863
864 864 def update(repo, node, quietempty=False, updatecheck=None):
865 865 """update the working directory to node"""
866 866 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
867 867 _showstats(repo, stats, quietempty)
868 868 if stats.unresolvedcount:
869 869 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
870 870 return stats.unresolvedcount > 0
871 871
872 872 # naming conflict in clone()
873 873 _update = update
874 874
875 875 def clean(repo, node, show_stats=True, quietempty=False):
876 876 """forcibly switch the working directory to node, clobbering changes"""
877 877 stats = updaterepo(repo, node, True)
878 878 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
879 879 if show_stats:
880 880 _showstats(repo, stats, quietempty)
881 881 return stats.unresolvedcount > 0
882 882
883 883 # naming conflict in updatetotally()
884 884 _clean = clean
885 885
886 _VALID_UPDATECHECKS = {mergemod.UPDATECHECK_ABORT,
887 mergemod.UPDATECHECK_NONE,
888 mergemod.UPDATECHECK_LINEAR,
889 mergemod.UPDATECHECK_NO_CONFLICT,
890 }
891
886 892 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
887 893 """Update the working directory with extra care for non-file components
888 894
889 895 This takes care of non-file components below:
890 896
891 897 :bookmark: might be advanced or (in)activated
892 898
893 899 This takes arguments below:
894 900
895 901 :checkout: to which revision the working directory is updated
896 902 :brev: a name, which might be a bookmark to be activated after updating
897 903 :clean: whether changes in the working directory can be discarded
898 904 :updatecheck: how to deal with a dirty working directory
899 905
900 906 Valid values for updatecheck are the UPDATECHECK_* constants
901 907 defined in the merge module. Passing `None` will result in using the
902 908 configured default.
903 909
904 910 * ABORT: abort if the working directory is dirty
905 911 * NONE: don't check (merge working directory changes into destination)
906 912 * LINEAR: check that update is linear before merging working directory
907 913 changes into destination
908 914 * NO_CONFLICT: check that the update does not result in file merges
909 915
910 916 This returns whether conflict is detected at updating or not.
911 917 """
912 918 if updatecheck is None:
913 919 updatecheck = ui.config('commands', 'update.check')
914 if updatecheck not in (mergemod.UPDATECHECK_ABORT,
915 mergemod.UPDATECHECK_NONE,
916 mergemod.UPDATECHECK_LINEAR,
917 mergemod.UPDATECHECK_NO_CONFLICT):
920 if updatecheck not in _VALID_UPDATECHECKS:
918 921 # If not configured, or invalid value configured
919 922 updatecheck = mergemod.UPDATECHECK_LINEAR
923 if updatecheck not in _VALID_UPDATECHECKS:
924 raise ValueError(r'Invalid updatecheck value %r (can accept %r)' % (
925 updatecheck, _VALID_UPDATECHECKS))
920 926 with repo.wlock():
921 927 movemarkfrom = None
922 928 warndest = False
923 929 if checkout is None:
924 930 updata = destutil.destupdate(repo, clean=clean)
925 931 checkout, movemarkfrom, brev = updata
926 932 warndest = True
927 933
928 934 if clean:
929 935 ret = _clean(repo, checkout)
930 936 else:
931 937 if updatecheck == mergemod.UPDATECHECK_ABORT:
932 938 cmdutil.bailifchanged(repo, merge=False)
933 939 updatecheck = mergemod.UPDATECHECK_NONE
934 940 ret = _update(repo, checkout, updatecheck=updatecheck)
935 941
936 942 if not ret and movemarkfrom:
937 943 if movemarkfrom == repo['.'].node():
938 944 pass # no-op update
939 945 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
940 946 b = ui.label(repo._activebookmark, 'bookmarks.active')
941 947 ui.status(_("updating bookmark %s\n") % b)
942 948 else:
943 949 # this can happen with a non-linear update
944 950 b = ui.label(repo._activebookmark, 'bookmarks')
945 951 ui.status(_("(leaving bookmark %s)\n") % b)
946 952 bookmarks.deactivate(repo)
947 953 elif brev in repo._bookmarks:
948 954 if brev != repo._activebookmark:
949 955 b = ui.label(brev, 'bookmarks.active')
950 956 ui.status(_("(activating bookmark %s)\n") % b)
951 957 bookmarks.activate(repo, brev)
952 958 elif brev:
953 959 if repo._activebookmark:
954 960 b = ui.label(repo._activebookmark, 'bookmarks')
955 961 ui.status(_("(leaving bookmark %s)\n") % b)
956 962 bookmarks.deactivate(repo)
957 963
958 964 if warndest:
959 965 destutil.statusotherdests(ui, repo)
960 966
961 967 return ret
962 968
963 969 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
964 970 abort=False):
965 971 """Branch merge with node, resolving changes. Return true if any
966 972 unresolved conflicts."""
967 973 if abort:
968 974 return abortmerge(repo.ui, repo)
969 975
970 976 stats = mergemod.update(repo, node, branchmerge=True, force=force,
971 977 mergeforce=mergeforce, labels=labels)
972 978 _showstats(repo, stats)
973 979 if stats.unresolvedcount:
974 980 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
975 981 "or 'hg merge --abort' to abandon\n"))
976 982 elif remind:
977 983 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
978 984 return stats.unresolvedcount > 0
979 985
980 986 def abortmerge(ui, repo):
981 987 ms = mergemod.mergestate.read(repo)
982 988 if ms.active():
983 989 # there were conflicts
984 990 node = ms.localctx.hex()
985 991 else:
986 992 # there were no conficts, mergestate was not stored
987 993 node = repo['.'].hex()
988 994
989 995 repo.ui.status(_("aborting the merge, updating back to"
990 996 " %s\n") % node[:12])
991 997 stats = mergemod.update(repo, node, branchmerge=False, force=True)
992 998 _showstats(repo, stats)
993 999 return stats.unresolvedcount > 0
994 1000
995 1001 def _incoming(displaychlist, subreporecurse, ui, repo, source,
996 1002 opts, buffered=False):
997 1003 """
998 1004 Helper for incoming / gincoming.
999 1005 displaychlist gets called with
1000 1006 (remoterepo, incomingchangesetlist, displayer) parameters,
1001 1007 and is supposed to contain only code that can't be unified.
1002 1008 """
1003 1009 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
1004 1010 other = peer(repo, opts, source)
1005 1011 ui.status(_('comparing with %s\n') % util.hidepassword(source))
1006 1012 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
1007 1013
1008 1014 if revs:
1009 1015 revs = [other.lookup(rev) for rev in revs]
1010 1016 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
1011 1017 revs, opts["bundle"], opts["force"])
1012 1018 try:
1013 1019 if not chlist:
1014 1020 ui.status(_("no changes found\n"))
1015 1021 return subreporecurse()
1016 1022 ui.pager('incoming')
1017 1023 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1018 1024 buffered=buffered)
1019 1025 displaychlist(other, chlist, displayer)
1020 1026 displayer.close()
1021 1027 finally:
1022 1028 cleanupfn()
1023 1029 subreporecurse()
1024 1030 return 0 # exit code is zero since we found incoming changes
1025 1031
1026 1032 def incoming(ui, repo, source, opts):
1027 1033 def subreporecurse():
1028 1034 ret = 1
1029 1035 if opts.get('subrepos'):
1030 1036 ctx = repo[None]
1031 1037 for subpath in sorted(ctx.substate):
1032 1038 sub = ctx.sub(subpath)
1033 1039 ret = min(ret, sub.incoming(ui, source, opts))
1034 1040 return ret
1035 1041
1036 1042 def display(other, chlist, displayer):
1037 1043 limit = logcmdutil.getlimit(opts)
1038 1044 if opts.get('newest_first'):
1039 1045 chlist.reverse()
1040 1046 count = 0
1041 1047 for n in chlist:
1042 1048 if limit is not None and count >= limit:
1043 1049 break
1044 1050 parents = [p for p in other.changelog.parents(n) if p != nullid]
1045 1051 if opts.get('no_merges') and len(parents) == 2:
1046 1052 continue
1047 1053 count += 1
1048 1054 displayer.show(other[n])
1049 1055 return _incoming(display, subreporecurse, ui, repo, source, opts)
1050 1056
1051 1057 def _outgoing(ui, repo, dest, opts):
1052 1058 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1053 1059 if not path:
1054 1060 raise error.Abort(_('default repository not configured!'),
1055 1061 hint=_("see 'hg help config.paths'"))
1056 1062 dest = path.pushloc or path.loc
1057 1063 branches = path.branch, opts.get('branch') or []
1058 1064
1059 1065 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1060 1066 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1061 1067 if revs:
1062 1068 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1063 1069
1064 1070 other = peer(repo, opts, dest)
1065 1071 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1066 1072 force=opts.get('force'))
1067 1073 o = outgoing.missing
1068 1074 if not o:
1069 1075 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1070 1076 return o, other
1071 1077
1072 1078 def outgoing(ui, repo, dest, opts):
1073 1079 def recurse():
1074 1080 ret = 1
1075 1081 if opts.get('subrepos'):
1076 1082 ctx = repo[None]
1077 1083 for subpath in sorted(ctx.substate):
1078 1084 sub = ctx.sub(subpath)
1079 1085 ret = min(ret, sub.outgoing(ui, dest, opts))
1080 1086 return ret
1081 1087
1082 1088 limit = logcmdutil.getlimit(opts)
1083 1089 o, other = _outgoing(ui, repo, dest, opts)
1084 1090 if not o:
1085 1091 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1086 1092 return recurse()
1087 1093
1088 1094 if opts.get('newest_first'):
1089 1095 o.reverse()
1090 1096 ui.pager('outgoing')
1091 1097 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1092 1098 count = 0
1093 1099 for n in o:
1094 1100 if limit is not None and count >= limit:
1095 1101 break
1096 1102 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1097 1103 if opts.get('no_merges') and len(parents) == 2:
1098 1104 continue
1099 1105 count += 1
1100 1106 displayer.show(repo[n])
1101 1107 displayer.close()
1102 1108 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1103 1109 recurse()
1104 1110 return 0 # exit code is zero since we found outgoing changes
1105 1111
1106 1112 def verify(repo, level=None):
1107 1113 """verify the consistency of a repository"""
1108 1114 ret = verifymod.verify(repo, level=level)
1109 1115
1110 1116 # Broken subrepo references in hidden csets don't seem worth worrying about,
1111 1117 # since they can't be pushed/pulled, and --hidden can be used if they are a
1112 1118 # concern.
1113 1119
1114 1120 # pathto() is needed for -R case
1115 1121 revs = repo.revs("filelog(%s)",
1116 1122 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1117 1123
1118 1124 if revs:
1119 1125 repo.ui.status(_('checking subrepo links\n'))
1120 1126 for rev in revs:
1121 1127 ctx = repo[rev]
1122 1128 try:
1123 1129 for subpath in ctx.substate:
1124 1130 try:
1125 1131 ret = (ctx.sub(subpath, allowcreate=False).verify()
1126 1132 or ret)
1127 1133 except error.RepoError as e:
1128 1134 repo.ui.warn(('%d: %s\n') % (rev, e))
1129 1135 except Exception:
1130 1136 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1131 1137 node.short(ctx.node()))
1132 1138
1133 1139 return ret
1134 1140
1135 1141 def remoteui(src, opts):
1136 1142 'build a remote ui from ui or repo and opts'
1137 1143 if util.safehasattr(src, 'baseui'): # looks like a repository
1138 1144 dst = src.baseui.copy() # drop repo-specific config
1139 1145 src = src.ui # copy target options from repo
1140 1146 else: # assume it's a global ui object
1141 1147 dst = src.copy() # keep all global options
1142 1148
1143 1149 # copy ssh-specific options
1144 1150 for o in 'ssh', 'remotecmd':
1145 1151 v = opts.get(o) or src.config('ui', o)
1146 1152 if v:
1147 1153 dst.setconfig("ui", o, v, 'copied')
1148 1154
1149 1155 # copy bundle-specific options
1150 1156 r = src.config('bundle', 'mainreporoot')
1151 1157 if r:
1152 1158 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1153 1159
1154 1160 # copy selected local settings to the remote ui
1155 1161 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1156 1162 for key, val in src.configitems(sect):
1157 1163 dst.setconfig(sect, key, val, 'copied')
1158 1164 v = src.config('web', 'cacerts')
1159 1165 if v:
1160 1166 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1161 1167
1162 1168 return dst
1163 1169
1164 1170 # Files of interest
1165 1171 # Used to check if the repository has changed looking at mtime and size of
1166 1172 # these files.
1167 1173 foi = [('spath', '00changelog.i'),
1168 1174 ('spath', 'phaseroots'), # ! phase can change content at the same size
1169 1175 ('spath', 'obsstore'),
1170 1176 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1171 1177 ]
1172 1178
1173 1179 class cachedlocalrepo(object):
1174 1180 """Holds a localrepository that can be cached and reused."""
1175 1181
1176 1182 def __init__(self, repo):
1177 1183 """Create a new cached repo from an existing repo.
1178 1184
1179 1185 We assume the passed in repo was recently created. If the
1180 1186 repo has changed between when it was created and when it was
1181 1187 turned into a cache, it may not refresh properly.
1182 1188 """
1183 1189 assert isinstance(repo, localrepo.localrepository)
1184 1190 self._repo = repo
1185 1191 self._state, self.mtime = self._repostate()
1186 1192 self._filtername = repo.filtername
1187 1193
1188 1194 def fetch(self):
1189 1195 """Refresh (if necessary) and return a repository.
1190 1196
1191 1197 If the cached instance is out of date, it will be recreated
1192 1198 automatically and returned.
1193 1199
1194 1200 Returns a tuple of the repo and a boolean indicating whether a new
1195 1201 repo instance was created.
1196 1202 """
1197 1203 # We compare the mtimes and sizes of some well-known files to
1198 1204 # determine if the repo changed. This is not precise, as mtimes
1199 1205 # are susceptible to clock skew and imprecise filesystems and
1200 1206 # file content can change while maintaining the same size.
1201 1207
1202 1208 state, mtime = self._repostate()
1203 1209 if state == self._state:
1204 1210 return self._repo, False
1205 1211
1206 1212 repo = repository(self._repo.baseui, self._repo.url())
1207 1213 if self._filtername:
1208 1214 self._repo = repo.filtered(self._filtername)
1209 1215 else:
1210 1216 self._repo = repo.unfiltered()
1211 1217 self._state = state
1212 1218 self.mtime = mtime
1213 1219
1214 1220 return self._repo, True
1215 1221
1216 1222 def _repostate(self):
1217 1223 state = []
1218 1224 maxmtime = -1
1219 1225 for attr, fname in foi:
1220 1226 prefix = getattr(self._repo, attr)
1221 1227 p = os.path.join(prefix, fname)
1222 1228 try:
1223 1229 st = os.stat(p)
1224 1230 except OSError:
1225 1231 st = os.stat(prefix)
1226 1232 state.append((st[stat.ST_MTIME], st.st_size))
1227 1233 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1228 1234
1229 1235 return tuple(state), maxmtime
1230 1236
1231 1237 def copy(self):
1232 1238 """Obtain a copy of this class instance.
1233 1239
1234 1240 A new localrepository instance is obtained. The new instance should be
1235 1241 completely independent of the original.
1236 1242 """
1237 1243 repo = repository(self._repo.baseui, self._repo.origroot)
1238 1244 if self._filtername:
1239 1245 repo = repo.filtered(self._filtername)
1240 1246 else:
1241 1247 repo = repo.unfiltered()
1242 1248 c = cachedlocalrepo(repo)
1243 1249 c._state = self._state
1244 1250 c.mtime = self.mtime
1245 1251 return c
General Comments 0
You need to be logged in to leave comments. Login now