##// END OF EJS Templates
manifest: move manifest creation to a helper function...
Durham Goode -
r30218:1767723f stable
parent child Browse files
Show More
@@ -1,554 +1,553 b''
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import os
17 17 import shutil
18 18 import tempfile
19 19
20 20 from .i18n import _
21 21 from .node import nullid
22 22
23 23 from . import (
24 24 bundle2,
25 25 changegroup,
26 26 changelog,
27 27 cmdutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 filelog,
32 32 localrepo,
33 33 manifest,
34 34 mdiff,
35 35 node as nodemod,
36 36 pathutil,
37 37 phases,
38 38 revlog,
39 39 scmutil,
40 40 util,
41 41 )
42 42
43 43 class bundlerevlog(revlog.revlog):
44 44 def __init__(self, opener, indexfile, bundle, linkmapper):
45 45 # How it works:
46 46 # To retrieve a revision, we need to know the offset of the revision in
47 47 # the bundle (an unbundle object). We store this offset in the index
48 48 # (start). The base of the delta is stored in the base field.
49 49 #
50 50 # To differentiate a rev in the bundle from a rev in the revlog, we
51 51 # check revision against repotiprev.
52 52 opener = scmutil.readonlyvfs(opener)
53 53 revlog.revlog.__init__(self, opener, indexfile)
54 54 self.bundle = bundle
55 55 n = len(self)
56 56 self.repotiprev = n - 1
57 57 chain = None
58 58 self.bundlerevs = set() # used by 'bundle()' revset expression
59 59 getchunk = lambda: bundle.deltachunk(chain)
60 60 for chunkdata in iter(getchunk, {}):
61 61 node = chunkdata['node']
62 62 p1 = chunkdata['p1']
63 63 p2 = chunkdata['p2']
64 64 cs = chunkdata['cs']
65 65 deltabase = chunkdata['deltabase']
66 66 delta = chunkdata['delta']
67 67
68 68 size = len(delta)
69 69 start = bundle.tell() - size
70 70
71 71 link = linkmapper(cs)
72 72 if node in self.nodemap:
73 73 # this can happen if two branches make the same change
74 74 chain = node
75 75 self.bundlerevs.add(self.nodemap[node])
76 76 continue
77 77
78 78 for p in (p1, p2):
79 79 if p not in self.nodemap:
80 80 raise error.LookupError(p, self.indexfile,
81 81 _("unknown parent"))
82 82
83 83 if deltabase not in self.nodemap:
84 84 raise LookupError(deltabase, self.indexfile,
85 85 _('unknown delta base'))
86 86
87 87 baserev = self.rev(deltabase)
88 88 # start, size, full unc. size, base (unused), link, p1, p2, node
89 89 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
90 90 self.rev(p1), self.rev(p2), node)
91 91 self.index.insert(-1, e)
92 92 self.nodemap[node] = n
93 93 self.bundlerevs.add(n)
94 94 chain = node
95 95 n += 1
96 96
97 97 def _chunk(self, rev):
98 98 # Warning: in case of bundle, the diff is against what we stored as
99 99 # delta base, not against rev - 1
100 100 # XXX: could use some caching
101 101 if rev <= self.repotiprev:
102 102 return revlog.revlog._chunk(self, rev)
103 103 self.bundle.seek(self.start(rev))
104 104 return self.bundle.read(self.length(rev))
105 105
106 106 def revdiff(self, rev1, rev2):
107 107 """return or calculate a delta between two revisions"""
108 108 if rev1 > self.repotiprev and rev2 > self.repotiprev:
109 109 # hot path for bundle
110 110 revb = self.index[rev2][3]
111 111 if revb == rev1:
112 112 return self._chunk(rev2)
113 113 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
114 114 return revlog.revlog.revdiff(self, rev1, rev2)
115 115
116 116 return mdiff.textdiff(self.revision(self.node(rev1)),
117 117 self.revision(self.node(rev2)))
118 118
119 119 def revision(self, nodeorrev):
120 120 """return an uncompressed revision of a given node or revision
121 121 number.
122 122 """
123 123 if isinstance(nodeorrev, int):
124 124 rev = nodeorrev
125 125 node = self.node(rev)
126 126 else:
127 127 node = nodeorrev
128 128 rev = self.rev(node)
129 129
130 130 if node == nullid:
131 131 return ""
132 132
133 133 text = None
134 134 chain = []
135 135 iterrev = rev
136 136 # reconstruct the revision if it is from a changegroup
137 137 while iterrev > self.repotiprev:
138 138 if self._cache and self._cache[1] == iterrev:
139 139 text = self._cache[2]
140 140 break
141 141 chain.append(iterrev)
142 142 iterrev = self.index[iterrev][3]
143 143 if text is None:
144 144 text = self.baserevision(iterrev)
145 145
146 146 while chain:
147 147 delta = self._chunk(chain.pop())
148 148 text = mdiff.patches(text, [delta])
149 149
150 150 self._checkhash(text, node, rev)
151 151 self._cache = (node, rev, text)
152 152 return text
153 153
154 154 def baserevision(self, nodeorrev):
155 155 # Revlog subclasses may override 'revision' method to modify format of
156 156 # content retrieved from revlog. To use bundlerevlog with such class one
157 157 # needs to override 'baserevision' and make more specific call here.
158 158 return revlog.revlog.revision(self, nodeorrev)
159 159
160 160 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
161 161 raise NotImplementedError
162 162 def addgroup(self, revs, linkmapper, transaction):
163 163 raise NotImplementedError
164 164 def strip(self, rev, minlink):
165 165 raise NotImplementedError
166 166 def checksize(self):
167 167 raise NotImplementedError
168 168
169 169 class bundlechangelog(bundlerevlog, changelog.changelog):
170 170 def __init__(self, opener, bundle):
171 171 changelog.changelog.__init__(self, opener)
172 172 linkmapper = lambda x: x
173 173 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
174 174 linkmapper)
175 175
176 176 def baserevision(self, nodeorrev):
177 177 # Although changelog doesn't override 'revision' method, some extensions
178 178 # may replace this class with another that does. Same story with
179 179 # manifest and filelog classes.
180 180
181 181 # This bypasses filtering on changelog.node() and rev() because we need
182 182 # revision text of the bundle base even if it is hidden.
183 183 oldfilter = self.filteredrevs
184 184 try:
185 185 self.filteredrevs = ()
186 186 return changelog.changelog.revision(self, nodeorrev)
187 187 finally:
188 188 self.filteredrevs = oldfilter
189 189
190 190 class bundlemanifest(bundlerevlog, manifest.manifest):
191 191 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
192 192 manifest.manifest.__init__(self, opener, dir=dir)
193 193 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
194 194 linkmapper)
195 195 if dirlogstarts is None:
196 196 dirlogstarts = {}
197 197 if self.bundle.version == "03":
198 198 dirlogstarts = _getfilestarts(self.bundle)
199 199 self._dirlogstarts = dirlogstarts
200 200 self._linkmapper = linkmapper
201 201
202 202 def baserevision(self, nodeorrev):
203 203 node = nodeorrev
204 204 if isinstance(node, int):
205 205 node = self.node(node)
206 206
207 207 if node in self.fulltextcache:
208 208 result = self.fulltextcache[node].tostring()
209 209 else:
210 210 result = manifest.manifest.revision(self, nodeorrev)
211 211 return result
212 212
213 213 def dirlog(self, d):
214 214 if d in self._dirlogstarts:
215 215 self.bundle.seek(self._dirlogstarts[d])
216 216 return bundlemanifest(
217 217 self.opener, self.bundle, self._linkmapper,
218 218 self._dirlogstarts, dir=d)
219 219 return super(bundlemanifest, self).dirlog(d)
220 220
221 221 class bundlefilelog(bundlerevlog, filelog.filelog):
222 222 def __init__(self, opener, path, bundle, linkmapper):
223 223 filelog.filelog.__init__(self, opener, path)
224 224 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
225 225 linkmapper)
226 226
227 227 def baserevision(self, nodeorrev):
228 228 return filelog.filelog.revision(self, nodeorrev)
229 229
230 230 class bundlepeer(localrepo.localpeer):
231 231 def canpush(self):
232 232 return False
233 233
234 234 class bundlephasecache(phases.phasecache):
235 235 def __init__(self, *args, **kwargs):
236 236 super(bundlephasecache, self).__init__(*args, **kwargs)
237 237 if util.safehasattr(self, 'opener'):
238 238 self.opener = scmutil.readonlyvfs(self.opener)
239 239
240 240 def write(self):
241 241 raise NotImplementedError
242 242
243 243 def _write(self, fp):
244 244 raise NotImplementedError
245 245
246 246 def _updateroots(self, phase, newroots, tr):
247 247 self.phaseroots[phase] = newroots
248 248 self.invalidate()
249 249 self.dirty = True
250 250
251 251 def _getfilestarts(bundle):
252 252 bundlefilespos = {}
253 253 for chunkdata in iter(bundle.filelogheader, {}):
254 254 fname = chunkdata['filename']
255 255 bundlefilespos[fname] = bundle.tell()
256 256 for chunk in iter(lambda: bundle.deltachunk(None), {}):
257 257 pass
258 258 return bundlefilespos
259 259
260 260 class bundlerepository(localrepo.localrepository):
261 261 def __init__(self, ui, path, bundlename):
262 262 def _writetempbundle(read, suffix, header=''):
263 263 """Write a temporary file to disk
264 264
265 265 This is closure because we need to make sure this tracked by
266 266 self.tempfile for cleanup purposes."""
267 267 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
268 268 suffix=".hg10un")
269 269 self.tempfile = temp
270 270
271 271 with os.fdopen(fdtemp, 'wb') as fptemp:
272 272 fptemp.write(header)
273 273 while True:
274 274 chunk = read(2**18)
275 275 if not chunk:
276 276 break
277 277 fptemp.write(chunk)
278 278
279 279 return self.vfs.open(self.tempfile, mode="rb")
280 280 self._tempparent = None
281 281 try:
282 282 localrepo.localrepository.__init__(self, ui, path)
283 283 except error.RepoError:
284 284 self._tempparent = tempfile.mkdtemp()
285 285 localrepo.instance(ui, self._tempparent, 1)
286 286 localrepo.localrepository.__init__(self, ui, self._tempparent)
287 287 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
288 288
289 289 if path:
290 290 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
291 291 else:
292 292 self._url = 'bundle:' + bundlename
293 293
294 294 self.tempfile = None
295 295 f = util.posixfile(bundlename, "rb")
296 296 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
297 297
298 298 if isinstance(self.bundle, bundle2.unbundle20):
299 299 cgstream = None
300 300 for part in self.bundle.iterparts():
301 301 if part.type == 'changegroup':
302 302 if cgstream is not None:
303 303 raise NotImplementedError("can't process "
304 304 "multiple changegroups")
305 305 cgstream = part
306 306 version = part.params.get('version', '01')
307 307 legalcgvers = changegroup.supportedincomingversions(self)
308 308 if version not in legalcgvers:
309 309 msg = _('Unsupported changegroup version: %s')
310 310 raise error.Abort(msg % version)
311 311 if self.bundle.compressed():
312 312 cgstream = _writetempbundle(part.read,
313 313 ".cg%sun" % version)
314 314
315 315 if cgstream is None:
316 316 raise error.Abort(_('No changegroups found'))
317 317 cgstream.seek(0)
318 318
319 319 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
320 320
321 321 elif self.bundle.compressed():
322 322 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
323 323 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
324 324 bundlename,
325 325 self.vfs)
326 326
327 327 # dict with the mapping 'filename' -> position in the bundle
328 328 self.bundlefilespos = {}
329 329
330 330 self.firstnewrev = self.changelog.repotiprev + 1
331 331 phases.retractboundary(self, None, phases.draft,
332 332 [ctx.node() for ctx in self[self.firstnewrev:]])
333 333
334 334 @localrepo.unfilteredpropertycache
335 335 def _phasecache(self):
336 336 return bundlephasecache(self, self._phasedefaults)
337 337
338 338 @localrepo.unfilteredpropertycache
339 339 def changelog(self):
340 340 # consume the header if it exists
341 341 self.bundle.changelogheader()
342 342 c = bundlechangelog(self.svfs, self.bundle)
343 343 self.manstart = self.bundle.tell()
344 344 return c
345 345
346 @localrepo.unfilteredpropertycache
347 def manifest(self):
346 def _constructmanifest(self):
348 347 self.bundle.seek(self.manstart)
349 348 # consume the header if it exists
350 349 self.bundle.manifestheader()
351 350 linkmapper = self.unfiltered().changelog.rev
352 351 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
353 352 self.filestart = self.bundle.tell()
354 353 return m
355 354
356 355 @localrepo.unfilteredpropertycache
357 356 def manstart(self):
358 357 self.changelog
359 358 return self.manstart
360 359
361 360 @localrepo.unfilteredpropertycache
362 361 def filestart(self):
363 362 self.manifest
364 363 return self.filestart
365 364
366 365 def url(self):
367 366 return self._url
368 367
369 368 def file(self, f):
370 369 if not self.bundlefilespos:
371 370 self.bundle.seek(self.filestart)
372 371 self.bundlefilespos = _getfilestarts(self.bundle)
373 372
374 373 if f in self.bundlefilespos:
375 374 self.bundle.seek(self.bundlefilespos[f])
376 375 linkmapper = self.unfiltered().changelog.rev
377 376 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
378 377 else:
379 378 return filelog.filelog(self.svfs, f)
380 379
381 380 def close(self):
382 381 """Close assigned bundle file immediately."""
383 382 self.bundlefile.close()
384 383 if self.tempfile is not None:
385 384 self.vfs.unlink(self.tempfile)
386 385 if self._tempparent:
387 386 shutil.rmtree(self._tempparent, True)
388 387
389 388 def cancopy(self):
390 389 return False
391 390
392 391 def peer(self):
393 392 return bundlepeer(self)
394 393
395 394 def getcwd(self):
396 395 return os.getcwd() # always outside the repo
397 396
398 397 # Check if parents exist in localrepo before setting
399 398 def setparents(self, p1, p2=nullid):
400 399 p1rev = self.changelog.rev(p1)
401 400 p2rev = self.changelog.rev(p2)
402 401 msg = _("setting parent to node %s that only exists in the bundle\n")
403 402 if self.changelog.repotiprev < p1rev:
404 403 self.ui.warn(msg % nodemod.hex(p1))
405 404 if self.changelog.repotiprev < p2rev:
406 405 self.ui.warn(msg % nodemod.hex(p2))
407 406 return super(bundlerepository, self).setparents(p1, p2)
408 407
409 408 def instance(ui, path, create):
410 409 if create:
411 410 raise error.Abort(_('cannot create new bundle repository'))
412 411 # internal config: bundle.mainreporoot
413 412 parentpath = ui.config("bundle", "mainreporoot", "")
414 413 if not parentpath:
415 414 # try to find the correct path to the working directory repo
416 415 parentpath = cmdutil.findrepo(os.getcwd())
417 416 if parentpath is None:
418 417 parentpath = ''
419 418 if parentpath:
420 419 # Try to make the full path relative so we get a nice, short URL.
421 420 # In particular, we don't want temp dir names in test outputs.
422 421 cwd = os.getcwd()
423 422 if parentpath == cwd:
424 423 parentpath = ''
425 424 else:
426 425 cwd = pathutil.normasprefix(cwd)
427 426 if parentpath.startswith(cwd):
428 427 parentpath = parentpath[len(cwd):]
429 428 u = util.url(path)
430 429 path = u.localpath()
431 430 if u.scheme == 'bundle':
432 431 s = path.split("+", 1)
433 432 if len(s) == 1:
434 433 repopath, bundlename = parentpath, s[0]
435 434 else:
436 435 repopath, bundlename = s
437 436 else:
438 437 repopath, bundlename = parentpath, path
439 438 return bundlerepository(ui, repopath, bundlename)
440 439
441 440 class bundletransactionmanager(object):
442 441 def transaction(self):
443 442 return None
444 443
445 444 def close(self):
446 445 raise NotImplementedError
447 446
448 447 def release(self):
449 448 raise NotImplementedError
450 449
451 450 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
452 451 force=False):
453 452 '''obtains a bundle of changes incoming from other
454 453
455 454 "onlyheads" restricts the returned changes to those reachable from the
456 455 specified heads.
457 456 "bundlename", if given, stores the bundle to this file path permanently;
458 457 otherwise it's stored to a temp file and gets deleted again when you call
459 458 the returned "cleanupfn".
460 459 "force" indicates whether to proceed on unrelated repos.
461 460
462 461 Returns a tuple (local, csets, cleanupfn):
463 462
464 463 "local" is a local repo from which to obtain the actual incoming
465 464 changesets; it is a bundlerepo for the obtained bundle when the
466 465 original "other" is remote.
467 466 "csets" lists the incoming changeset node ids.
468 467 "cleanupfn" must be called without arguments when you're done processing
469 468 the changes; it closes both the original "other" and the one returned
470 469 here.
471 470 '''
472 471 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
473 472 force=force)
474 473 common, incoming, rheads = tmp
475 474 if not incoming:
476 475 try:
477 476 if bundlename:
478 477 os.unlink(bundlename)
479 478 except OSError:
480 479 pass
481 480 return repo, [], other.close
482 481
483 482 commonset = set(common)
484 483 rheads = [x for x in rheads if x not in commonset]
485 484
486 485 bundle = None
487 486 bundlerepo = None
488 487 localrepo = other.local()
489 488 if bundlename or not localrepo:
490 489 # create a bundle (uncompressed if other repo is not local)
491 490
492 491 # developer config: devel.legacy.exchange
493 492 legexc = ui.configlist('devel', 'legacy.exchange')
494 493 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
495 494 canbundle2 = (not forcebundle1
496 495 and other.capable('getbundle')
497 496 and other.capable('bundle2'))
498 497 if canbundle2:
499 498 kwargs = {}
500 499 kwargs['common'] = common
501 500 kwargs['heads'] = rheads
502 501 kwargs['bundlecaps'] = exchange.caps20to10(repo)
503 502 kwargs['cg'] = True
504 503 b2 = other.getbundle('incoming', **kwargs)
505 504 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
506 505 bundlename)
507 506 else:
508 507 if other.capable('getbundle'):
509 508 cg = other.getbundle('incoming', common=common, heads=rheads)
510 509 elif onlyheads is None and not other.capable('changegroupsubset'):
511 510 # compat with older servers when pulling all remote heads
512 511 cg = other.changegroup(incoming, "incoming")
513 512 rheads = None
514 513 else:
515 514 cg = other.changegroupsubset(incoming, rheads, 'incoming')
516 515 if localrepo:
517 516 bundletype = "HG10BZ"
518 517 else:
519 518 bundletype = "HG10UN"
520 519 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
521 520 bundletype)
522 521 # keep written bundle?
523 522 if bundlename:
524 523 bundle = None
525 524 if not localrepo:
526 525 # use the created uncompressed bundlerepo
527 526 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
528 527 fname)
529 528 # this repo contains local and other now, so filter out local again
530 529 common = repo.heads()
531 530 if localrepo:
532 531 # Part of common may be remotely filtered
533 532 # So use an unfiltered version
534 533 # The discovery process probably need cleanup to avoid that
535 534 localrepo = localrepo.unfiltered()
536 535
537 536 csets = localrepo.changelog.findmissing(common, rheads)
538 537
539 538 if bundlerepo:
540 539 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
541 540 remotephases = other.listkeys('phases')
542 541
543 542 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
544 543 pullop.trmanager = bundletransactionmanager()
545 544 exchange._pullapplyphases(pullop, remotephases)
546 545
547 546 def cleanup():
548 547 if bundlerepo:
549 548 bundlerepo.close()
550 549 if bundle:
551 550 os.unlink(bundle)
552 551 other.close()
553 552
554 553 return (localrepo, csets, cleanup)
@@ -1,2000 +1,2006 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 urlerr = util.urlerr
62 62 urlreq = util.urlreq
63 63
64 64 class repofilecache(scmutil.filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 if repo is None:
70 70 return self
71 71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 72 def __set__(self, repo, value):
73 73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 74 def __delete__(self, repo):
75 75 return super(repofilecache, self).__delete__(repo.unfiltered())
76 76
77 77 class storecache(repofilecache):
78 78 """filecache for files in the store"""
79 79 def join(self, obj, fname):
80 80 return obj.sjoin(fname)
81 81
82 82 class unfilteredpropertycache(util.propertycache):
83 83 """propertycache that apply to unfiltered repo only"""
84 84
85 85 def __get__(self, repo, type=None):
86 86 unfi = repo.unfiltered()
87 87 if unfi is repo:
88 88 return super(unfilteredpropertycache, self).__get__(unfi)
89 89 return getattr(unfi, self.name)
90 90
91 91 class filteredpropertycache(util.propertycache):
92 92 """propertycache that must take filtering in account"""
93 93
94 94 def cachevalue(self, obj, value):
95 95 object.__setattr__(obj, self.name, value)
96 96
97 97
98 98 def hasunfilteredcache(repo, name):
99 99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 100 return name in vars(repo.unfiltered())
101 101
102 102 def unfilteredmethod(orig):
103 103 """decorate method that always need to be run on unfiltered version"""
104 104 def wrapper(repo, *args, **kwargs):
105 105 return orig(repo.unfiltered(), *args, **kwargs)
106 106 return wrapper
107 107
108 108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 109 'unbundle'))
110 110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 111
112 112 class localpeer(peer.peerrepository):
113 113 '''peer for a local repo; reflects only the most recent API'''
114 114
115 115 def __init__(self, repo, caps=moderncaps):
116 116 peer.peerrepository.__init__(self)
117 117 self._repo = repo.filtered('served')
118 118 self.ui = repo.ui
119 119 self._caps = repo._restrictcapabilities(caps)
120 120 self.requirements = repo.requirements
121 121 self.supportedformats = repo.supportedformats
122 122
123 123 def close(self):
124 124 self._repo.close()
125 125
126 126 def _capabilities(self):
127 127 return self._caps
128 128
129 129 def local(self):
130 130 return self._repo
131 131
132 132 def canpush(self):
133 133 return True
134 134
135 135 def url(self):
136 136 return self._repo.url()
137 137
138 138 def lookup(self, key):
139 139 return self._repo.lookup(key)
140 140
141 141 def branchmap(self):
142 142 return self._repo.branchmap()
143 143
144 144 def heads(self):
145 145 return self._repo.heads()
146 146
147 147 def known(self, nodes):
148 148 return self._repo.known(nodes)
149 149
150 150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 151 **kwargs):
152 152 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 153 common=common, bundlecaps=bundlecaps,
154 154 **kwargs)
155 155 cb = util.chunkbuffer(chunks)
156 156
157 157 if bundlecaps is not None and 'HG20' in bundlecaps:
158 158 # When requesting a bundle2, getbundle returns a stream to make the
159 159 # wire level function happier. We need to build a proper object
160 160 # from it in local peer.
161 161 return bundle2.getunbundler(self.ui, cb)
162 162 else:
163 163 return changegroup.getunbundler('01', cb, None)
164 164
165 165 # TODO We might want to move the next two calls into legacypeer and add
166 166 # unbundle instead.
167 167
168 168 def unbundle(self, cg, heads, url):
169 169 """apply a bundle on a repo
170 170
171 171 This function handles the repo locking itself."""
172 172 try:
173 173 try:
174 174 cg = exchange.readbundle(self.ui, cg, None)
175 175 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 176 if util.safehasattr(ret, 'getchunks'):
177 177 # This is a bundle20 object, turn it into an unbundler.
178 178 # This little dance should be dropped eventually when the
179 179 # API is finally improved.
180 180 stream = util.chunkbuffer(ret.getchunks())
181 181 ret = bundle2.getunbundler(self.ui, stream)
182 182 return ret
183 183 except Exception as exc:
184 184 # If the exception contains output salvaged from a bundle2
185 185 # reply, we need to make sure it is printed before continuing
186 186 # to fail. So we build a bundle2 with such output and consume
187 187 # it directly.
188 188 #
189 189 # This is not very elegant but allows a "simple" solution for
190 190 # issue4594
191 191 output = getattr(exc, '_bundle2salvagedoutput', ())
192 192 if output:
193 193 bundler = bundle2.bundle20(self._repo.ui)
194 194 for out in output:
195 195 bundler.addpart(out)
196 196 stream = util.chunkbuffer(bundler.getchunks())
197 197 b = bundle2.getunbundler(self.ui, stream)
198 198 bundle2.processbundle(self._repo, b)
199 199 raise
200 200 except error.PushRaced as exc:
201 201 raise error.ResponseError(_('push failed:'), str(exc))
202 202
203 203 def lock(self):
204 204 return self._repo.lock()
205 205
206 206 def addchangegroup(self, cg, source, url):
207 207 return cg.apply(self._repo, source, url)
208 208
209 209 def pushkey(self, namespace, key, old, new):
210 210 return self._repo.pushkey(namespace, key, old, new)
211 211
212 212 def listkeys(self, namespace):
213 213 return self._repo.listkeys(namespace)
214 214
215 215 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 216 '''used to test argument passing over the wire'''
217 217 return "%s %s %s %s %s" % (one, two, three, four, five)
218 218
219 219 class locallegacypeer(localpeer):
220 220 '''peer extension which implements legacy methods too; used for tests with
221 221 restricted capabilities'''
222 222
223 223 def __init__(self, repo):
224 224 localpeer.__init__(self, repo, caps=legacycaps)
225 225
226 226 def branches(self, nodes):
227 227 return self._repo.branches(nodes)
228 228
229 229 def between(self, pairs):
230 230 return self._repo.between(pairs)
231 231
232 232 def changegroup(self, basenodes, source):
233 233 return changegroup.changegroup(self._repo, basenodes, source)
234 234
235 235 def changegroupsubset(self, bases, heads, source):
236 236 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237 237
238 238 class localrepository(object):
239 239
240 240 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 241 'manifestv2'))
242 242 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 243 'dotencode'))
244 244 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 245 filtername = None
246 246
247 247 # a list of (ui, featureset) functions.
248 248 # only functions defined in module of enabled extensions are invoked
249 249 featuresetupfuncs = set()
250 250
251 251 def __init__(self, baseui, path=None, create=False):
252 252 self.requirements = set()
253 253 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 254 self.wopener = self.wvfs
255 255 self.root = self.wvfs.base
256 256 self.path = self.wvfs.join(".hg")
257 257 self.origroot = path
258 258 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 259 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 260 realfs=False)
261 261 self.vfs = scmutil.vfs(self.path)
262 262 self.opener = self.vfs
263 263 self.baseui = baseui
264 264 self.ui = baseui.copy()
265 265 self.ui.copy = baseui.copy # prevent copying repo configuration
266 266 # A list of callback to shape the phase if no data were found.
267 267 # Callback are in the form: func(repo, roots) --> processed root.
268 268 # This list it to be filled by extension during repo setup
269 269 self._phasedefaults = []
270 270 try:
271 271 self.ui.readconfig(self.join("hgrc"), self.root)
272 272 extensions.loadall(self.ui)
273 273 except IOError:
274 274 pass
275 275
276 276 if self.featuresetupfuncs:
277 277 self.supported = set(self._basesupported) # use private copy
278 278 extmods = set(m.__name__ for n, m
279 279 in extensions.extensions(self.ui))
280 280 for setupfunc in self.featuresetupfuncs:
281 281 if setupfunc.__module__ in extmods:
282 282 setupfunc(self.ui, self.supported)
283 283 else:
284 284 self.supported = self._basesupported
285 285
286 286 if not self.vfs.isdir():
287 287 if create:
288 288 self.requirements = newreporequirements(self)
289 289
290 290 if not self.wvfs.exists():
291 291 self.wvfs.makedirs()
292 292 self.vfs.makedir(notindexed=True)
293 293
294 294 if 'store' in self.requirements:
295 295 self.vfs.mkdir("store")
296 296
297 297 # create an invalid changelog
298 298 self.vfs.append(
299 299 "00changelog.i",
300 300 '\0\0\0\2' # represents revlogv2
301 301 ' dummy changelog to prevent using the old repo layout'
302 302 )
303 303 else:
304 304 raise error.RepoError(_("repository %s not found") % path)
305 305 elif create:
306 306 raise error.RepoError(_("repository %s already exists") % path)
307 307 else:
308 308 try:
309 309 self.requirements = scmutil.readrequires(
310 310 self.vfs, self.supported)
311 311 except IOError as inst:
312 312 if inst.errno != errno.ENOENT:
313 313 raise
314 314
315 315 self.sharedpath = self.path
316 316 try:
317 317 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
318 318 realpath=True)
319 319 s = vfs.base
320 320 if not vfs.exists():
321 321 raise error.RepoError(
322 322 _('.hg/sharedpath points to nonexistent directory %s') % s)
323 323 self.sharedpath = s
324 324 except IOError as inst:
325 325 if inst.errno != errno.ENOENT:
326 326 raise
327 327
328 328 self.store = store.store(
329 329 self.requirements, self.sharedpath, scmutil.vfs)
330 330 self.spath = self.store.path
331 331 self.svfs = self.store.vfs
332 332 self.sjoin = self.store.join
333 333 self.vfs.createmode = self.store.createmode
334 334 self._applyopenerreqs()
335 335 if create:
336 336 self._writerequirements()
337 337
338 338 self._dirstatevalidatewarned = False
339 339
340 340 self._branchcaches = {}
341 341 self._revbranchcache = None
342 342 self.filterpats = {}
343 343 self._datafilters = {}
344 344 self._transref = self._lockref = self._wlockref = None
345 345
346 346 # A cache for various files under .hg/ that tracks file changes,
347 347 # (used by the filecache decorator)
348 348 #
349 349 # Maps a property name to its util.filecacheentry
350 350 self._filecache = {}
351 351
352 352 # hold sets of revision to be filtered
353 353 # should be cleared when something might have changed the filter value:
354 354 # - new changesets,
355 355 # - phase change,
356 356 # - new obsolescence marker,
357 357 # - working directory parent change,
358 358 # - bookmark changes
359 359 self.filteredrevcache = {}
360 360
361 361 # generic mapping between names and nodes
362 362 self.names = namespaces.namespaces()
363 363
364 364 def close(self):
365 365 self._writecaches()
366 366
367 367 def _writecaches(self):
368 368 if self._revbranchcache:
369 369 self._revbranchcache.write()
370 370
371 371 def _restrictcapabilities(self, caps):
372 372 if self.ui.configbool('experimental', 'bundle2-advertise', True):
373 373 caps = set(caps)
374 374 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
375 375 caps.add('bundle2=' + urlreq.quote(capsblob))
376 376 return caps
377 377
378 378 def _applyopenerreqs(self):
379 379 self.svfs.options = dict((r, 1) for r in self.requirements
380 380 if r in self.openerreqs)
381 381 # experimental config: format.chunkcachesize
382 382 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
383 383 if chunkcachesize is not None:
384 384 self.svfs.options['chunkcachesize'] = chunkcachesize
385 385 # experimental config: format.maxchainlen
386 386 maxchainlen = self.ui.configint('format', 'maxchainlen')
387 387 if maxchainlen is not None:
388 388 self.svfs.options['maxchainlen'] = maxchainlen
389 389 # experimental config: format.manifestcachesize
390 390 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
391 391 if manifestcachesize is not None:
392 392 self.svfs.options['manifestcachesize'] = manifestcachesize
393 393 # experimental config: format.aggressivemergedeltas
394 394 aggressivemergedeltas = self.ui.configbool('format',
395 395 'aggressivemergedeltas', False)
396 396 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
397 397 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
398 398
399 399 def _writerequirements(self):
400 400 scmutil.writerequires(self.vfs, self.requirements)
401 401
402 402 def _checknested(self, path):
403 403 """Determine if path is a legal nested repository."""
404 404 if not path.startswith(self.root):
405 405 return False
406 406 subpath = path[len(self.root) + 1:]
407 407 normsubpath = util.pconvert(subpath)
408 408
409 409 # XXX: Checking against the current working copy is wrong in
410 410 # the sense that it can reject things like
411 411 #
412 412 # $ hg cat -r 10 sub/x.txt
413 413 #
414 414 # if sub/ is no longer a subrepository in the working copy
415 415 # parent revision.
416 416 #
417 417 # However, it can of course also allow things that would have
418 418 # been rejected before, such as the above cat command if sub/
419 419 # is a subrepository now, but was a normal directory before.
420 420 # The old path auditor would have rejected by mistake since it
421 421 # panics when it sees sub/.hg/.
422 422 #
423 423 # All in all, checking against the working copy seems sensible
424 424 # since we want to prevent access to nested repositories on
425 425 # the filesystem *now*.
426 426 ctx = self[None]
427 427 parts = util.splitpath(subpath)
428 428 while parts:
429 429 prefix = '/'.join(parts)
430 430 if prefix in ctx.substate:
431 431 if prefix == normsubpath:
432 432 return True
433 433 else:
434 434 sub = ctx.sub(prefix)
435 435 return sub.checknested(subpath[len(prefix) + 1:])
436 436 else:
437 437 parts.pop()
438 438 return False
439 439
440 440 def peer(self):
441 441 return localpeer(self) # not cached to avoid reference cycle
442 442
443 443 def unfiltered(self):
444 444 """Return unfiltered version of the repository
445 445
446 446 Intended to be overwritten by filtered repo."""
447 447 return self
448 448
449 449 def filtered(self, name):
450 450 """Return a filtered version of a repository"""
451 451 # build a new class with the mixin and the current class
452 452 # (possibly subclass of the repo)
453 453 class proxycls(repoview.repoview, self.unfiltered().__class__):
454 454 pass
455 455 return proxycls(self, name)
456 456
457 457 @repofilecache('bookmarks', 'bookmarks.current')
458 458 def _bookmarks(self):
459 459 return bookmarks.bmstore(self)
460 460
461 461 @property
462 462 def _activebookmark(self):
463 463 return self._bookmarks.active
464 464
465 465 def bookmarkheads(self, bookmark):
466 466 name = bookmark.split('@', 1)[0]
467 467 heads = []
468 468 for mark, n in self._bookmarks.iteritems():
469 469 if mark.split('@', 1)[0] == name:
470 470 heads.append(n)
471 471 return heads
472 472
473 473 # _phaserevs and _phasesets depend on changelog. what we need is to
474 474 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
475 475 # can't be easily expressed in filecache mechanism.
476 476 @storecache('phaseroots', '00changelog.i')
477 477 def _phasecache(self):
478 478 return phases.phasecache(self, self._phasedefaults)
479 479
480 480 @storecache('obsstore')
481 481 def obsstore(self):
482 482 # read default format for new obsstore.
483 483 # developer config: format.obsstore-version
484 484 defaultformat = self.ui.configint('format', 'obsstore-version', None)
485 485 # rely on obsstore class default when possible.
486 486 kwargs = {}
487 487 if defaultformat is not None:
488 488 kwargs['defaultformat'] = defaultformat
489 489 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
490 490 store = obsolete.obsstore(self.svfs, readonly=readonly,
491 491 **kwargs)
492 492 if store and readonly:
493 493 self.ui.warn(
494 494 _('obsolete feature not enabled but %i markers found!\n')
495 495 % len(list(store)))
496 496 return store
497 497
498 498 @storecache('00changelog.i')
499 499 def changelog(self):
500 500 c = changelog.changelog(self.svfs)
501 501 if 'HG_PENDING' in os.environ:
502 502 p = os.environ['HG_PENDING']
503 503 if p.startswith(self.root):
504 504 c.readpending('00changelog.i.a')
505 505 return c
506 506
507 507 @storecache('00manifest.i')
508 508 def manifest(self):
509 return self._constructmanifest()
510
511 def _constructmanifest(self):
512 # This is a temporary function while we migrate from manifest to
513 # manifestlog. It allows bundlerepo and unionrepo to intercept the
514 # manifest creation.
509 515 return manifest.manifest(self.svfs)
510 516
511 517 @property
512 518 def manifestlog(self):
513 519 return manifest.manifestlog(self.svfs, self)
514 520
515 521 @repofilecache('dirstate')
516 522 def dirstate(self):
517 523 return dirstate.dirstate(self.vfs, self.ui, self.root,
518 524 self._dirstatevalidate)
519 525
520 526 def _dirstatevalidate(self, node):
521 527 try:
522 528 self.changelog.rev(node)
523 529 return node
524 530 except error.LookupError:
525 531 if not self._dirstatevalidatewarned:
526 532 self._dirstatevalidatewarned = True
527 533 self.ui.warn(_("warning: ignoring unknown"
528 534 " working parent %s!\n") % short(node))
529 535 return nullid
530 536
531 537 def __getitem__(self, changeid):
532 538 if changeid is None or changeid == wdirrev:
533 539 return context.workingctx(self)
534 540 if isinstance(changeid, slice):
535 541 return [context.changectx(self, i)
536 542 for i in xrange(*changeid.indices(len(self)))
537 543 if i not in self.changelog.filteredrevs]
538 544 return context.changectx(self, changeid)
539 545
540 546 def __contains__(self, changeid):
541 547 try:
542 548 self[changeid]
543 549 return True
544 550 except error.RepoLookupError:
545 551 return False
546 552
547 553 def __nonzero__(self):
548 554 return True
549 555
550 556 def __len__(self):
551 557 return len(self.changelog)
552 558
553 559 def __iter__(self):
554 560 return iter(self.changelog)
555 561
556 562 def revs(self, expr, *args):
557 563 '''Find revisions matching a revset.
558 564
559 565 The revset is specified as a string ``expr`` that may contain
560 566 %-formatting to escape certain types. See ``revset.formatspec``.
561 567
562 568 Revset aliases from the configuration are not expanded. To expand
563 569 user aliases, consider calling ``scmutil.revrange()``.
564 570
565 571 Returns a revset.abstractsmartset, which is a list-like interface
566 572 that contains integer revisions.
567 573 '''
568 574 expr = revset.formatspec(expr, *args)
569 575 m = revset.match(None, expr)
570 576 return m(self)
571 577
572 578 def set(self, expr, *args):
573 579 '''Find revisions matching a revset and emit changectx instances.
574 580
575 581 This is a convenience wrapper around ``revs()`` that iterates the
576 582 result and is a generator of changectx instances.
577 583
578 584 Revset aliases from the configuration are not expanded. To expand
579 585 user aliases, consider calling ``scmutil.revrange()``.
580 586 '''
581 587 for r in self.revs(expr, *args):
582 588 yield self[r]
583 589
584 590 def url(self):
585 591 return 'file:' + self.root
586 592
587 593 def hook(self, name, throw=False, **args):
588 594 """Call a hook, passing this repo instance.
589 595
590 596 This a convenience method to aid invoking hooks. Extensions likely
591 597 won't call this unless they have registered a custom hook or are
592 598 replacing code that is expected to call a hook.
593 599 """
594 600 return hook.hook(self.ui, self, name, throw, **args)
595 601
596 602 @unfilteredmethod
597 603 def _tag(self, names, node, message, local, user, date, extra=None,
598 604 editor=False):
599 605 if isinstance(names, str):
600 606 names = (names,)
601 607
602 608 branches = self.branchmap()
603 609 for name in names:
604 610 self.hook('pretag', throw=True, node=hex(node), tag=name,
605 611 local=local)
606 612 if name in branches:
607 613 self.ui.warn(_("warning: tag %s conflicts with existing"
608 614 " branch name\n") % name)
609 615
610 616 def writetags(fp, names, munge, prevtags):
611 617 fp.seek(0, 2)
612 618 if prevtags and prevtags[-1] != '\n':
613 619 fp.write('\n')
614 620 for name in names:
615 621 if munge:
616 622 m = munge(name)
617 623 else:
618 624 m = name
619 625
620 626 if (self._tagscache.tagtypes and
621 627 name in self._tagscache.tagtypes):
622 628 old = self.tags().get(name, nullid)
623 629 fp.write('%s %s\n' % (hex(old), m))
624 630 fp.write('%s %s\n' % (hex(node), m))
625 631 fp.close()
626 632
627 633 prevtags = ''
628 634 if local:
629 635 try:
630 636 fp = self.vfs('localtags', 'r+')
631 637 except IOError:
632 638 fp = self.vfs('localtags', 'a')
633 639 else:
634 640 prevtags = fp.read()
635 641
636 642 # local tags are stored in the current charset
637 643 writetags(fp, names, None, prevtags)
638 644 for name in names:
639 645 self.hook('tag', node=hex(node), tag=name, local=local)
640 646 return
641 647
642 648 try:
643 649 fp = self.wfile('.hgtags', 'rb+')
644 650 except IOError as e:
645 651 if e.errno != errno.ENOENT:
646 652 raise
647 653 fp = self.wfile('.hgtags', 'ab')
648 654 else:
649 655 prevtags = fp.read()
650 656
651 657 # committed tags are stored in UTF-8
652 658 writetags(fp, names, encoding.fromlocal, prevtags)
653 659
654 660 fp.close()
655 661
656 662 self.invalidatecaches()
657 663
658 664 if '.hgtags' not in self.dirstate:
659 665 self[None].add(['.hgtags'])
660 666
661 667 m = matchmod.exact(self.root, '', ['.hgtags'])
662 668 tagnode = self.commit(message, user, date, extra=extra, match=m,
663 669 editor=editor)
664 670
665 671 for name in names:
666 672 self.hook('tag', node=hex(node), tag=name, local=local)
667 673
668 674 return tagnode
669 675
670 676 def tag(self, names, node, message, local, user, date, editor=False):
671 677 '''tag a revision with one or more symbolic names.
672 678
673 679 names is a list of strings or, when adding a single tag, names may be a
674 680 string.
675 681
676 682 if local is True, the tags are stored in a per-repository file.
677 683 otherwise, they are stored in the .hgtags file, and a new
678 684 changeset is committed with the change.
679 685
680 686 keyword arguments:
681 687
682 688 local: whether to store tags in non-version-controlled file
683 689 (default False)
684 690
685 691 message: commit message to use if committing
686 692
687 693 user: name of user to use if committing
688 694
689 695 date: date tuple to use if committing'''
690 696
691 697 if not local:
692 698 m = matchmod.exact(self.root, '', ['.hgtags'])
693 699 if any(self.status(match=m, unknown=True, ignored=True)):
694 700 raise error.Abort(_('working copy of .hgtags is changed'),
695 701 hint=_('please commit .hgtags manually'))
696 702
697 703 self.tags() # instantiate the cache
698 704 self._tag(names, node, message, local, user, date, editor=editor)
699 705
700 706 @filteredpropertycache
701 707 def _tagscache(self):
702 708 '''Returns a tagscache object that contains various tags related
703 709 caches.'''
704 710
705 711 # This simplifies its cache management by having one decorated
706 712 # function (this one) and the rest simply fetch things from it.
707 713 class tagscache(object):
708 714 def __init__(self):
709 715 # These two define the set of tags for this repository. tags
710 716 # maps tag name to node; tagtypes maps tag name to 'global' or
711 717 # 'local'. (Global tags are defined by .hgtags across all
712 718 # heads, and local tags are defined in .hg/localtags.)
713 719 # They constitute the in-memory cache of tags.
714 720 self.tags = self.tagtypes = None
715 721
716 722 self.nodetagscache = self.tagslist = None
717 723
718 724 cache = tagscache()
719 725 cache.tags, cache.tagtypes = self._findtags()
720 726
721 727 return cache
722 728
723 729 def tags(self):
724 730 '''return a mapping of tag to node'''
725 731 t = {}
726 732 if self.changelog.filteredrevs:
727 733 tags, tt = self._findtags()
728 734 else:
729 735 tags = self._tagscache.tags
730 736 for k, v in tags.iteritems():
731 737 try:
732 738 # ignore tags to unknown nodes
733 739 self.changelog.rev(v)
734 740 t[k] = v
735 741 except (error.LookupError, ValueError):
736 742 pass
737 743 return t
738 744
739 745 def _findtags(self):
740 746 '''Do the hard work of finding tags. Return a pair of dicts
741 747 (tags, tagtypes) where tags maps tag name to node, and tagtypes
742 748 maps tag name to a string like \'global\' or \'local\'.
743 749 Subclasses or extensions are free to add their own tags, but
744 750 should be aware that the returned dicts will be retained for the
745 751 duration of the localrepo object.'''
746 752
747 753 # XXX what tagtype should subclasses/extensions use? Currently
748 754 # mq and bookmarks add tags, but do not set the tagtype at all.
749 755 # Should each extension invent its own tag type? Should there
750 756 # be one tagtype for all such "virtual" tags? Or is the status
751 757 # quo fine?
752 758
753 759 alltags = {} # map tag name to (node, hist)
754 760 tagtypes = {}
755 761
756 762 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
757 763 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
758 764
759 765 # Build the return dicts. Have to re-encode tag names because
760 766 # the tags module always uses UTF-8 (in order not to lose info
761 767 # writing to the cache), but the rest of Mercurial wants them in
762 768 # local encoding.
763 769 tags = {}
764 770 for (name, (node, hist)) in alltags.iteritems():
765 771 if node != nullid:
766 772 tags[encoding.tolocal(name)] = node
767 773 tags['tip'] = self.changelog.tip()
768 774 tagtypes = dict([(encoding.tolocal(name), value)
769 775 for (name, value) in tagtypes.iteritems()])
770 776 return (tags, tagtypes)
771 777
772 778 def tagtype(self, tagname):
773 779 '''
774 780 return the type of the given tag. result can be:
775 781
776 782 'local' : a local tag
777 783 'global' : a global tag
778 784 None : tag does not exist
779 785 '''
780 786
781 787 return self._tagscache.tagtypes.get(tagname)
782 788
783 789 def tagslist(self):
784 790 '''return a list of tags ordered by revision'''
785 791 if not self._tagscache.tagslist:
786 792 l = []
787 793 for t, n in self.tags().iteritems():
788 794 l.append((self.changelog.rev(n), t, n))
789 795 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
790 796
791 797 return self._tagscache.tagslist
792 798
793 799 def nodetags(self, node):
794 800 '''return the tags associated with a node'''
795 801 if not self._tagscache.nodetagscache:
796 802 nodetagscache = {}
797 803 for t, n in self._tagscache.tags.iteritems():
798 804 nodetagscache.setdefault(n, []).append(t)
799 805 for tags in nodetagscache.itervalues():
800 806 tags.sort()
801 807 self._tagscache.nodetagscache = nodetagscache
802 808 return self._tagscache.nodetagscache.get(node, [])
803 809
804 810 def nodebookmarks(self, node):
805 811 """return the list of bookmarks pointing to the specified node"""
806 812 marks = []
807 813 for bookmark, n in self._bookmarks.iteritems():
808 814 if n == node:
809 815 marks.append(bookmark)
810 816 return sorted(marks)
811 817
812 818 def branchmap(self):
813 819 '''returns a dictionary {branch: [branchheads]} with branchheads
814 820 ordered by increasing revision number'''
815 821 branchmap.updatecache(self)
816 822 return self._branchcaches[self.filtername]
817 823
818 824 @unfilteredmethod
819 825 def revbranchcache(self):
820 826 if not self._revbranchcache:
821 827 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
822 828 return self._revbranchcache
823 829
824 830 def branchtip(self, branch, ignoremissing=False):
825 831 '''return the tip node for a given branch
826 832
827 833 If ignoremissing is True, then this method will not raise an error.
828 834 This is helpful for callers that only expect None for a missing branch
829 835 (e.g. namespace).
830 836
831 837 '''
832 838 try:
833 839 return self.branchmap().branchtip(branch)
834 840 except KeyError:
835 841 if not ignoremissing:
836 842 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
837 843 else:
838 844 pass
839 845
840 846 def lookup(self, key):
841 847 return self[key].node()
842 848
843 849 def lookupbranch(self, key, remote=None):
844 850 repo = remote or self
845 851 if key in repo.branchmap():
846 852 return key
847 853
848 854 repo = (remote and remote.local()) and remote or self
849 855 return repo[key].branch()
850 856
851 857 def known(self, nodes):
852 858 cl = self.changelog
853 859 nm = cl.nodemap
854 860 filtered = cl.filteredrevs
855 861 result = []
856 862 for n in nodes:
857 863 r = nm.get(n)
858 864 resp = not (r is None or r in filtered)
859 865 result.append(resp)
860 866 return result
861 867
862 868 def local(self):
863 869 return self
864 870
865 871 def publishing(self):
866 872 # it's safe (and desirable) to trust the publish flag unconditionally
867 873 # so that we don't finalize changes shared between users via ssh or nfs
868 874 return self.ui.configbool('phases', 'publish', True, untrusted=True)
869 875
870 876 def cancopy(self):
871 877 # so statichttprepo's override of local() works
872 878 if not self.local():
873 879 return False
874 880 if not self.publishing():
875 881 return True
876 882 # if publishing we can't copy if there is filtered content
877 883 return not self.filtered('visible').changelog.filteredrevs
878 884
879 885 def shared(self):
880 886 '''the type of shared repository (None if not shared)'''
881 887 if self.sharedpath != self.path:
882 888 return 'store'
883 889 return None
884 890
885 891 def join(self, f, *insidef):
886 892 return self.vfs.join(os.path.join(f, *insidef))
887 893
888 894 def wjoin(self, f, *insidef):
889 895 return self.vfs.reljoin(self.root, f, *insidef)
890 896
891 897 def file(self, f):
892 898 if f[0] == '/':
893 899 f = f[1:]
894 900 return filelog.filelog(self.svfs, f)
895 901
896 902 def changectx(self, changeid):
897 903 return self[changeid]
898 904
899 905 def setparents(self, p1, p2=nullid):
900 906 self.dirstate.beginparentchange()
901 907 copies = self.dirstate.setparents(p1, p2)
902 908 pctx = self[p1]
903 909 if copies:
904 910 # Adjust copy records, the dirstate cannot do it, it
905 911 # requires access to parents manifests. Preserve them
906 912 # only for entries added to first parent.
907 913 for f in copies:
908 914 if f not in pctx and copies[f] in pctx:
909 915 self.dirstate.copy(copies[f], f)
910 916 if p2 == nullid:
911 917 for f, s in sorted(self.dirstate.copies().items()):
912 918 if f not in pctx and s not in pctx:
913 919 self.dirstate.copy(None, f)
914 920 self.dirstate.endparentchange()
915 921
916 922 def filectx(self, path, changeid=None, fileid=None):
917 923 """changeid can be a changeset revision, node, or tag.
918 924 fileid can be a file revision or node."""
919 925 return context.filectx(self, path, changeid, fileid)
920 926
921 927 def getcwd(self):
922 928 return self.dirstate.getcwd()
923 929
924 930 def pathto(self, f, cwd=None):
925 931 return self.dirstate.pathto(f, cwd)
926 932
927 933 def wfile(self, f, mode='r'):
928 934 return self.wvfs(f, mode)
929 935
930 936 def _link(self, f):
931 937 return self.wvfs.islink(f)
932 938
933 939 def _loadfilter(self, filter):
934 940 if filter not in self.filterpats:
935 941 l = []
936 942 for pat, cmd in self.ui.configitems(filter):
937 943 if cmd == '!':
938 944 continue
939 945 mf = matchmod.match(self.root, '', [pat])
940 946 fn = None
941 947 params = cmd
942 948 for name, filterfn in self._datafilters.iteritems():
943 949 if cmd.startswith(name):
944 950 fn = filterfn
945 951 params = cmd[len(name):].lstrip()
946 952 break
947 953 if not fn:
948 954 fn = lambda s, c, **kwargs: util.filter(s, c)
949 955 # Wrap old filters not supporting keyword arguments
950 956 if not inspect.getargspec(fn)[2]:
951 957 oldfn = fn
952 958 fn = lambda s, c, **kwargs: oldfn(s, c)
953 959 l.append((mf, fn, params))
954 960 self.filterpats[filter] = l
955 961 return self.filterpats[filter]
956 962
957 963 def _filter(self, filterpats, filename, data):
958 964 for mf, fn, cmd in filterpats:
959 965 if mf(filename):
960 966 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
961 967 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
962 968 break
963 969
964 970 return data
965 971
966 972 @unfilteredpropertycache
967 973 def _encodefilterpats(self):
968 974 return self._loadfilter('encode')
969 975
970 976 @unfilteredpropertycache
971 977 def _decodefilterpats(self):
972 978 return self._loadfilter('decode')
973 979
974 980 def adddatafilter(self, name, filter):
975 981 self._datafilters[name] = filter
976 982
977 983 def wread(self, filename):
978 984 if self._link(filename):
979 985 data = self.wvfs.readlink(filename)
980 986 else:
981 987 data = self.wvfs.read(filename)
982 988 return self._filter(self._encodefilterpats, filename, data)
983 989
984 990 def wwrite(self, filename, data, flags, backgroundclose=False):
985 991 """write ``data`` into ``filename`` in the working directory
986 992
987 993 This returns length of written (maybe decoded) data.
988 994 """
989 995 data = self._filter(self._decodefilterpats, filename, data)
990 996 if 'l' in flags:
991 997 self.wvfs.symlink(data, filename)
992 998 else:
993 999 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
994 1000 if 'x' in flags:
995 1001 self.wvfs.setflags(filename, False, True)
996 1002 return len(data)
997 1003
998 1004 def wwritedata(self, filename, data):
999 1005 return self._filter(self._decodefilterpats, filename, data)
1000 1006
1001 1007 def currenttransaction(self):
1002 1008 """return the current transaction or None if non exists"""
1003 1009 if self._transref:
1004 1010 tr = self._transref()
1005 1011 else:
1006 1012 tr = None
1007 1013
1008 1014 if tr and tr.running():
1009 1015 return tr
1010 1016 return None
1011 1017
1012 1018 def transaction(self, desc, report=None):
1013 1019 if (self.ui.configbool('devel', 'all-warnings')
1014 1020 or self.ui.configbool('devel', 'check-locks')):
1015 1021 if self._currentlock(self._lockref) is None:
1016 1022 raise RuntimeError('programming error: transaction requires '
1017 1023 'locking')
1018 1024 tr = self.currenttransaction()
1019 1025 if tr is not None:
1020 1026 return tr.nest()
1021 1027
1022 1028 # abort here if the journal already exists
1023 1029 if self.svfs.exists("journal"):
1024 1030 raise error.RepoError(
1025 1031 _("abandoned transaction found"),
1026 1032 hint=_("run 'hg recover' to clean up transaction"))
1027 1033
1028 1034 idbase = "%.40f#%f" % (random.random(), time.time())
1029 1035 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1030 1036 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1031 1037
1032 1038 self._writejournal(desc)
1033 1039 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1034 1040 if report:
1035 1041 rp = report
1036 1042 else:
1037 1043 rp = self.ui.warn
1038 1044 vfsmap = {'plain': self.vfs} # root of .hg/
1039 1045 # we must avoid cyclic reference between repo and transaction.
1040 1046 reporef = weakref.ref(self)
1041 1047 def validate(tr):
1042 1048 """will run pre-closing hooks"""
1043 1049 reporef().hook('pretxnclose', throw=True,
1044 1050 txnname=desc, **tr.hookargs)
1045 1051 def releasefn(tr, success):
1046 1052 repo = reporef()
1047 1053 if success:
1048 1054 # this should be explicitly invoked here, because
1049 1055 # in-memory changes aren't written out at closing
1050 1056 # transaction, if tr.addfilegenerator (via
1051 1057 # dirstate.write or so) isn't invoked while
1052 1058 # transaction running
1053 1059 repo.dirstate.write(None)
1054 1060 else:
1055 1061 # discard all changes (including ones already written
1056 1062 # out) in this transaction
1057 1063 repo.dirstate.restorebackup(None, prefix='journal.')
1058 1064
1059 1065 repo.invalidate(clearfilecache=True)
1060 1066
1061 1067 tr = transaction.transaction(rp, self.svfs, vfsmap,
1062 1068 "journal",
1063 1069 "undo",
1064 1070 aftertrans(renames),
1065 1071 self.store.createmode,
1066 1072 validator=validate,
1067 1073 releasefn=releasefn)
1068 1074
1069 1075 tr.hookargs['txnid'] = txnid
1070 1076 # note: writing the fncache only during finalize mean that the file is
1071 1077 # outdated when running hooks. As fncache is used for streaming clone,
1072 1078 # this is not expected to break anything that happen during the hooks.
1073 1079 tr.addfinalize('flush-fncache', self.store.write)
1074 1080 def txnclosehook(tr2):
1075 1081 """To be run if transaction is successful, will schedule a hook run
1076 1082 """
1077 1083 # Don't reference tr2 in hook() so we don't hold a reference.
1078 1084 # This reduces memory consumption when there are multiple
1079 1085 # transactions per lock. This can likely go away if issue5045
1080 1086 # fixes the function accumulation.
1081 1087 hookargs = tr2.hookargs
1082 1088
1083 1089 def hook():
1084 1090 reporef().hook('txnclose', throw=False, txnname=desc,
1085 1091 **hookargs)
1086 1092 reporef()._afterlock(hook)
1087 1093 tr.addfinalize('txnclose-hook', txnclosehook)
1088 1094 def txnaborthook(tr2):
1089 1095 """To be run if transaction is aborted
1090 1096 """
1091 1097 reporef().hook('txnabort', throw=False, txnname=desc,
1092 1098 **tr2.hookargs)
1093 1099 tr.addabort('txnabort-hook', txnaborthook)
1094 1100 # avoid eager cache invalidation. in-memory data should be identical
1095 1101 # to stored data if transaction has no error.
1096 1102 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1097 1103 self._transref = weakref.ref(tr)
1098 1104 return tr
1099 1105
1100 1106 def _journalfiles(self):
1101 1107 return ((self.svfs, 'journal'),
1102 1108 (self.vfs, 'journal.dirstate'),
1103 1109 (self.vfs, 'journal.branch'),
1104 1110 (self.vfs, 'journal.desc'),
1105 1111 (self.vfs, 'journal.bookmarks'),
1106 1112 (self.svfs, 'journal.phaseroots'))
1107 1113
1108 1114 def undofiles(self):
1109 1115 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1110 1116
1111 1117 def _writejournal(self, desc):
1112 1118 self.dirstate.savebackup(None, prefix='journal.')
1113 1119 self.vfs.write("journal.branch",
1114 1120 encoding.fromlocal(self.dirstate.branch()))
1115 1121 self.vfs.write("journal.desc",
1116 1122 "%d\n%s\n" % (len(self), desc))
1117 1123 self.vfs.write("journal.bookmarks",
1118 1124 self.vfs.tryread("bookmarks"))
1119 1125 self.svfs.write("journal.phaseroots",
1120 1126 self.svfs.tryread("phaseroots"))
1121 1127
1122 1128 def recover(self):
1123 1129 with self.lock():
1124 1130 if self.svfs.exists("journal"):
1125 1131 self.ui.status(_("rolling back interrupted transaction\n"))
1126 1132 vfsmap = {'': self.svfs,
1127 1133 'plain': self.vfs,}
1128 1134 transaction.rollback(self.svfs, vfsmap, "journal",
1129 1135 self.ui.warn)
1130 1136 self.invalidate()
1131 1137 return True
1132 1138 else:
1133 1139 self.ui.warn(_("no interrupted transaction available\n"))
1134 1140 return False
1135 1141
1136 1142 def rollback(self, dryrun=False, force=False):
1137 1143 wlock = lock = dsguard = None
1138 1144 try:
1139 1145 wlock = self.wlock()
1140 1146 lock = self.lock()
1141 1147 if self.svfs.exists("undo"):
1142 1148 dsguard = cmdutil.dirstateguard(self, 'rollback')
1143 1149
1144 1150 return self._rollback(dryrun, force, dsguard)
1145 1151 else:
1146 1152 self.ui.warn(_("no rollback information available\n"))
1147 1153 return 1
1148 1154 finally:
1149 1155 release(dsguard, lock, wlock)
1150 1156
1151 1157 @unfilteredmethod # Until we get smarter cache management
1152 1158 def _rollback(self, dryrun, force, dsguard):
1153 1159 ui = self.ui
1154 1160 try:
1155 1161 args = self.vfs.read('undo.desc').splitlines()
1156 1162 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1157 1163 if len(args) >= 3:
1158 1164 detail = args[2]
1159 1165 oldtip = oldlen - 1
1160 1166
1161 1167 if detail and ui.verbose:
1162 1168 msg = (_('repository tip rolled back to revision %s'
1163 1169 ' (undo %s: %s)\n')
1164 1170 % (oldtip, desc, detail))
1165 1171 else:
1166 1172 msg = (_('repository tip rolled back to revision %s'
1167 1173 ' (undo %s)\n')
1168 1174 % (oldtip, desc))
1169 1175 except IOError:
1170 1176 msg = _('rolling back unknown transaction\n')
1171 1177 desc = None
1172 1178
1173 1179 if not force and self['.'] != self['tip'] and desc == 'commit':
1174 1180 raise error.Abort(
1175 1181 _('rollback of last commit while not checked out '
1176 1182 'may lose data'), hint=_('use -f to force'))
1177 1183
1178 1184 ui.status(msg)
1179 1185 if dryrun:
1180 1186 return 0
1181 1187
1182 1188 parents = self.dirstate.parents()
1183 1189 self.destroying()
1184 1190 vfsmap = {'plain': self.vfs, '': self.svfs}
1185 1191 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1186 1192 if self.vfs.exists('undo.bookmarks'):
1187 1193 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1188 1194 if self.svfs.exists('undo.phaseroots'):
1189 1195 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1190 1196 self.invalidate()
1191 1197
1192 1198 parentgone = (parents[0] not in self.changelog.nodemap or
1193 1199 parents[1] not in self.changelog.nodemap)
1194 1200 if parentgone:
1195 1201 # prevent dirstateguard from overwriting already restored one
1196 1202 dsguard.close()
1197 1203
1198 1204 self.dirstate.restorebackup(None, prefix='undo.')
1199 1205 try:
1200 1206 branch = self.vfs.read('undo.branch')
1201 1207 self.dirstate.setbranch(encoding.tolocal(branch))
1202 1208 except IOError:
1203 1209 ui.warn(_('named branch could not be reset: '
1204 1210 'current branch is still \'%s\'\n')
1205 1211 % self.dirstate.branch())
1206 1212
1207 1213 parents = tuple([p.rev() for p in self[None].parents()])
1208 1214 if len(parents) > 1:
1209 1215 ui.status(_('working directory now based on '
1210 1216 'revisions %d and %d\n') % parents)
1211 1217 else:
1212 1218 ui.status(_('working directory now based on '
1213 1219 'revision %d\n') % parents)
1214 1220 mergemod.mergestate.clean(self, self['.'].node())
1215 1221
1216 1222 # TODO: if we know which new heads may result from this rollback, pass
1217 1223 # them to destroy(), which will prevent the branchhead cache from being
1218 1224 # invalidated.
1219 1225 self.destroyed()
1220 1226 return 0
1221 1227
1222 1228 def invalidatecaches(self):
1223 1229
1224 1230 if '_tagscache' in vars(self):
1225 1231 # can't use delattr on proxy
1226 1232 del self.__dict__['_tagscache']
1227 1233
1228 1234 self.unfiltered()._branchcaches.clear()
1229 1235 self.invalidatevolatilesets()
1230 1236
1231 1237 def invalidatevolatilesets(self):
1232 1238 self.filteredrevcache.clear()
1233 1239 obsolete.clearobscaches(self)
1234 1240
1235 1241 def invalidatedirstate(self):
1236 1242 '''Invalidates the dirstate, causing the next call to dirstate
1237 1243 to check if it was modified since the last time it was read,
1238 1244 rereading it if it has.
1239 1245
1240 1246 This is different to dirstate.invalidate() that it doesn't always
1241 1247 rereads the dirstate. Use dirstate.invalidate() if you want to
1242 1248 explicitly read the dirstate again (i.e. restoring it to a previous
1243 1249 known good state).'''
1244 1250 if hasunfilteredcache(self, 'dirstate'):
1245 1251 for k in self.dirstate._filecache:
1246 1252 try:
1247 1253 delattr(self.dirstate, k)
1248 1254 except AttributeError:
1249 1255 pass
1250 1256 delattr(self.unfiltered(), 'dirstate')
1251 1257
1252 1258 def invalidate(self, clearfilecache=False):
1253 1259 '''Invalidates both store and non-store parts other than dirstate
1254 1260
1255 1261 If a transaction is running, invalidation of store is omitted,
1256 1262 because discarding in-memory changes might cause inconsistency
1257 1263 (e.g. incomplete fncache causes unintentional failure, but
1258 1264 redundant one doesn't).
1259 1265 '''
1260 1266 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1261 1267 for k in self._filecache.keys():
1262 1268 # dirstate is invalidated separately in invalidatedirstate()
1263 1269 if k == 'dirstate':
1264 1270 continue
1265 1271
1266 1272 if clearfilecache:
1267 1273 del self._filecache[k]
1268 1274 try:
1269 1275 delattr(unfiltered, k)
1270 1276 except AttributeError:
1271 1277 pass
1272 1278 self.invalidatecaches()
1273 1279 if not self.currenttransaction():
1274 1280 # TODO: Changing contents of store outside transaction
1275 1281 # causes inconsistency. We should make in-memory store
1276 1282 # changes detectable, and abort if changed.
1277 1283 self.store.invalidatecaches()
1278 1284
1279 1285 def invalidateall(self):
1280 1286 '''Fully invalidates both store and non-store parts, causing the
1281 1287 subsequent operation to reread any outside changes.'''
1282 1288 # extension should hook this to invalidate its caches
1283 1289 self.invalidate()
1284 1290 self.invalidatedirstate()
1285 1291
1286 1292 @unfilteredmethod
1287 1293 def _refreshfilecachestats(self, tr):
1288 1294 """Reload stats of cached files so that they are flagged as valid"""
1289 1295 for k, ce in self._filecache.items():
1290 1296 if k == 'dirstate' or k not in self.__dict__:
1291 1297 continue
1292 1298 ce.refresh()
1293 1299
1294 1300 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1295 1301 inheritchecker=None, parentenvvar=None):
1296 1302 parentlock = None
1297 1303 # the contents of parentenvvar are used by the underlying lock to
1298 1304 # determine whether it can be inherited
1299 1305 if parentenvvar is not None:
1300 1306 parentlock = os.environ.get(parentenvvar)
1301 1307 try:
1302 1308 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1303 1309 acquirefn=acquirefn, desc=desc,
1304 1310 inheritchecker=inheritchecker,
1305 1311 parentlock=parentlock)
1306 1312 except error.LockHeld as inst:
1307 1313 if not wait:
1308 1314 raise
1309 1315 # show more details for new-style locks
1310 1316 if ':' in inst.locker:
1311 1317 host, pid = inst.locker.split(":", 1)
1312 1318 self.ui.warn(
1313 1319 _("waiting for lock on %s held by process %r "
1314 1320 "on host %r\n") % (desc, pid, host))
1315 1321 else:
1316 1322 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1317 1323 (desc, inst.locker))
1318 1324 # default to 600 seconds timeout
1319 1325 l = lockmod.lock(vfs, lockname,
1320 1326 int(self.ui.config("ui", "timeout", "600")),
1321 1327 releasefn=releasefn, acquirefn=acquirefn,
1322 1328 desc=desc)
1323 1329 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1324 1330 return l
1325 1331
1326 1332 def _afterlock(self, callback):
1327 1333 """add a callback to be run when the repository is fully unlocked
1328 1334
1329 1335 The callback will be executed when the outermost lock is released
1330 1336 (with wlock being higher level than 'lock')."""
1331 1337 for ref in (self._wlockref, self._lockref):
1332 1338 l = ref and ref()
1333 1339 if l and l.held:
1334 1340 l.postrelease.append(callback)
1335 1341 break
1336 1342 else: # no lock have been found.
1337 1343 callback()
1338 1344
1339 1345 def lock(self, wait=True):
1340 1346 '''Lock the repository store (.hg/store) and return a weak reference
1341 1347 to the lock. Use this before modifying the store (e.g. committing or
1342 1348 stripping). If you are opening a transaction, get a lock as well.)
1343 1349
1344 1350 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1345 1351 'wlock' first to avoid a dead-lock hazard.'''
1346 1352 l = self._currentlock(self._lockref)
1347 1353 if l is not None:
1348 1354 l.lock()
1349 1355 return l
1350 1356
1351 1357 l = self._lock(self.svfs, "lock", wait, None,
1352 1358 self.invalidate, _('repository %s') % self.origroot)
1353 1359 self._lockref = weakref.ref(l)
1354 1360 return l
1355 1361
1356 1362 def _wlockchecktransaction(self):
1357 1363 if self.currenttransaction() is not None:
1358 1364 raise error.LockInheritanceContractViolation(
1359 1365 'wlock cannot be inherited in the middle of a transaction')
1360 1366
1361 1367 def wlock(self, wait=True):
1362 1368 '''Lock the non-store parts of the repository (everything under
1363 1369 .hg except .hg/store) and return a weak reference to the lock.
1364 1370
1365 1371 Use this before modifying files in .hg.
1366 1372
1367 1373 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1368 1374 'wlock' first to avoid a dead-lock hazard.'''
1369 1375 l = self._wlockref and self._wlockref()
1370 1376 if l is not None and l.held:
1371 1377 l.lock()
1372 1378 return l
1373 1379
1374 1380 # We do not need to check for non-waiting lock acquisition. Such
1375 1381 # acquisition would not cause dead-lock as they would just fail.
1376 1382 if wait and (self.ui.configbool('devel', 'all-warnings')
1377 1383 or self.ui.configbool('devel', 'check-locks')):
1378 1384 if self._currentlock(self._lockref) is not None:
1379 1385 self.ui.develwarn('"wlock" acquired after "lock"')
1380 1386
1381 1387 def unlock():
1382 1388 if self.dirstate.pendingparentchange():
1383 1389 self.dirstate.invalidate()
1384 1390 else:
1385 1391 self.dirstate.write(None)
1386 1392
1387 1393 self._filecache['dirstate'].refresh()
1388 1394
1389 1395 l = self._lock(self.vfs, "wlock", wait, unlock,
1390 1396 self.invalidatedirstate, _('working directory of %s') %
1391 1397 self.origroot,
1392 1398 inheritchecker=self._wlockchecktransaction,
1393 1399 parentenvvar='HG_WLOCK_LOCKER')
1394 1400 self._wlockref = weakref.ref(l)
1395 1401 return l
1396 1402
1397 1403 def _currentlock(self, lockref):
1398 1404 """Returns the lock if it's held, or None if it's not."""
1399 1405 if lockref is None:
1400 1406 return None
1401 1407 l = lockref()
1402 1408 if l is None or not l.held:
1403 1409 return None
1404 1410 return l
1405 1411
1406 1412 def currentwlock(self):
1407 1413 """Returns the wlock if it's held, or None if it's not."""
1408 1414 return self._currentlock(self._wlockref)
1409 1415
1410 1416 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1411 1417 """
1412 1418 commit an individual file as part of a larger transaction
1413 1419 """
1414 1420
1415 1421 fname = fctx.path()
1416 1422 fparent1 = manifest1.get(fname, nullid)
1417 1423 fparent2 = manifest2.get(fname, nullid)
1418 1424 if isinstance(fctx, context.filectx):
1419 1425 node = fctx.filenode()
1420 1426 if node in [fparent1, fparent2]:
1421 1427 self.ui.debug('reusing %s filelog entry\n' % fname)
1422 1428 if manifest1.flags(fname) != fctx.flags():
1423 1429 changelist.append(fname)
1424 1430 return node
1425 1431
1426 1432 flog = self.file(fname)
1427 1433 meta = {}
1428 1434 copy = fctx.renamed()
1429 1435 if copy and copy[0] != fname:
1430 1436 # Mark the new revision of this file as a copy of another
1431 1437 # file. This copy data will effectively act as a parent
1432 1438 # of this new revision. If this is a merge, the first
1433 1439 # parent will be the nullid (meaning "look up the copy data")
1434 1440 # and the second one will be the other parent. For example:
1435 1441 #
1436 1442 # 0 --- 1 --- 3 rev1 changes file foo
1437 1443 # \ / rev2 renames foo to bar and changes it
1438 1444 # \- 2 -/ rev3 should have bar with all changes and
1439 1445 # should record that bar descends from
1440 1446 # bar in rev2 and foo in rev1
1441 1447 #
1442 1448 # this allows this merge to succeed:
1443 1449 #
1444 1450 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1445 1451 # \ / merging rev3 and rev4 should use bar@rev2
1446 1452 # \- 2 --- 4 as the merge base
1447 1453 #
1448 1454
1449 1455 cfname = copy[0]
1450 1456 crev = manifest1.get(cfname)
1451 1457 newfparent = fparent2
1452 1458
1453 1459 if manifest2: # branch merge
1454 1460 if fparent2 == nullid or crev is None: # copied on remote side
1455 1461 if cfname in manifest2:
1456 1462 crev = manifest2[cfname]
1457 1463 newfparent = fparent1
1458 1464
1459 1465 # Here, we used to search backwards through history to try to find
1460 1466 # where the file copy came from if the source of a copy was not in
1461 1467 # the parent directory. However, this doesn't actually make sense to
1462 1468 # do (what does a copy from something not in your working copy even
1463 1469 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1464 1470 # the user that copy information was dropped, so if they didn't
1465 1471 # expect this outcome it can be fixed, but this is the correct
1466 1472 # behavior in this circumstance.
1467 1473
1468 1474 if crev:
1469 1475 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1470 1476 meta["copy"] = cfname
1471 1477 meta["copyrev"] = hex(crev)
1472 1478 fparent1, fparent2 = nullid, newfparent
1473 1479 else:
1474 1480 self.ui.warn(_("warning: can't find ancestor for '%s' "
1475 1481 "copied from '%s'!\n") % (fname, cfname))
1476 1482
1477 1483 elif fparent1 == nullid:
1478 1484 fparent1, fparent2 = fparent2, nullid
1479 1485 elif fparent2 != nullid:
1480 1486 # is one parent an ancestor of the other?
1481 1487 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1482 1488 if fparent1 in fparentancestors:
1483 1489 fparent1, fparent2 = fparent2, nullid
1484 1490 elif fparent2 in fparentancestors:
1485 1491 fparent2 = nullid
1486 1492
1487 1493 # is the file changed?
1488 1494 text = fctx.data()
1489 1495 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1490 1496 changelist.append(fname)
1491 1497 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1492 1498 # are just the flags changed during merge?
1493 1499 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1494 1500 changelist.append(fname)
1495 1501
1496 1502 return fparent1
1497 1503
1498 1504 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1499 1505 """check for commit arguments that aren't commitable"""
1500 1506 if match.isexact() or match.prefix():
1501 1507 matched = set(status.modified + status.added + status.removed)
1502 1508
1503 1509 for f in match.files():
1504 1510 f = self.dirstate.normalize(f)
1505 1511 if f == '.' or f in matched or f in wctx.substate:
1506 1512 continue
1507 1513 if f in status.deleted:
1508 1514 fail(f, _('file not found!'))
1509 1515 if f in vdirs: # visited directory
1510 1516 d = f + '/'
1511 1517 for mf in matched:
1512 1518 if mf.startswith(d):
1513 1519 break
1514 1520 else:
1515 1521 fail(f, _("no match under directory!"))
1516 1522 elif f not in self.dirstate:
1517 1523 fail(f, _("file not tracked!"))
1518 1524
1519 1525 @unfilteredmethod
1520 1526 def commit(self, text="", user=None, date=None, match=None, force=False,
1521 1527 editor=False, extra=None):
1522 1528 """Add a new revision to current repository.
1523 1529
1524 1530 Revision information is gathered from the working directory,
1525 1531 match can be used to filter the committed files. If editor is
1526 1532 supplied, it is called to get a commit message.
1527 1533 """
1528 1534 if extra is None:
1529 1535 extra = {}
1530 1536
1531 1537 def fail(f, msg):
1532 1538 raise error.Abort('%s: %s' % (f, msg))
1533 1539
1534 1540 if not match:
1535 1541 match = matchmod.always(self.root, '')
1536 1542
1537 1543 if not force:
1538 1544 vdirs = []
1539 1545 match.explicitdir = vdirs.append
1540 1546 match.bad = fail
1541 1547
1542 1548 wlock = lock = tr = None
1543 1549 try:
1544 1550 wlock = self.wlock()
1545 1551 lock = self.lock() # for recent changelog (see issue4368)
1546 1552
1547 1553 wctx = self[None]
1548 1554 merge = len(wctx.parents()) > 1
1549 1555
1550 1556 if not force and merge and match.ispartial():
1551 1557 raise error.Abort(_('cannot partially commit a merge '
1552 1558 '(do not specify files or patterns)'))
1553 1559
1554 1560 status = self.status(match=match, clean=force)
1555 1561 if force:
1556 1562 status.modified.extend(status.clean) # mq may commit clean files
1557 1563
1558 1564 # check subrepos
1559 1565 subs = []
1560 1566 commitsubs = set()
1561 1567 newstate = wctx.substate.copy()
1562 1568 # only manage subrepos and .hgsubstate if .hgsub is present
1563 1569 if '.hgsub' in wctx:
1564 1570 # we'll decide whether to track this ourselves, thanks
1565 1571 for c in status.modified, status.added, status.removed:
1566 1572 if '.hgsubstate' in c:
1567 1573 c.remove('.hgsubstate')
1568 1574
1569 1575 # compare current state to last committed state
1570 1576 # build new substate based on last committed state
1571 1577 oldstate = wctx.p1().substate
1572 1578 for s in sorted(newstate.keys()):
1573 1579 if not match(s):
1574 1580 # ignore working copy, use old state if present
1575 1581 if s in oldstate:
1576 1582 newstate[s] = oldstate[s]
1577 1583 continue
1578 1584 if not force:
1579 1585 raise error.Abort(
1580 1586 _("commit with new subrepo %s excluded") % s)
1581 1587 dirtyreason = wctx.sub(s).dirtyreason(True)
1582 1588 if dirtyreason:
1583 1589 if not self.ui.configbool('ui', 'commitsubrepos'):
1584 1590 raise error.Abort(dirtyreason,
1585 1591 hint=_("use --subrepos for recursive commit"))
1586 1592 subs.append(s)
1587 1593 commitsubs.add(s)
1588 1594 else:
1589 1595 bs = wctx.sub(s).basestate()
1590 1596 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1591 1597 if oldstate.get(s, (None, None, None))[1] != bs:
1592 1598 subs.append(s)
1593 1599
1594 1600 # check for removed subrepos
1595 1601 for p in wctx.parents():
1596 1602 r = [s for s in p.substate if s not in newstate]
1597 1603 subs += [s for s in r if match(s)]
1598 1604 if subs:
1599 1605 if (not match('.hgsub') and
1600 1606 '.hgsub' in (wctx.modified() + wctx.added())):
1601 1607 raise error.Abort(
1602 1608 _("can't commit subrepos without .hgsub"))
1603 1609 status.modified.insert(0, '.hgsubstate')
1604 1610
1605 1611 elif '.hgsub' in status.removed:
1606 1612 # clean up .hgsubstate when .hgsub is removed
1607 1613 if ('.hgsubstate' in wctx and
1608 1614 '.hgsubstate' not in (status.modified + status.added +
1609 1615 status.removed)):
1610 1616 status.removed.insert(0, '.hgsubstate')
1611 1617
1612 1618 # make sure all explicit patterns are matched
1613 1619 if not force:
1614 1620 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1615 1621
1616 1622 cctx = context.workingcommitctx(self, status,
1617 1623 text, user, date, extra)
1618 1624
1619 1625 # internal config: ui.allowemptycommit
1620 1626 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1621 1627 or extra.get('close') or merge or cctx.files()
1622 1628 or self.ui.configbool('ui', 'allowemptycommit'))
1623 1629 if not allowemptycommit:
1624 1630 return None
1625 1631
1626 1632 if merge and cctx.deleted():
1627 1633 raise error.Abort(_("cannot commit merge with missing files"))
1628 1634
1629 1635 ms = mergemod.mergestate.read(self)
1630 1636
1631 1637 if list(ms.unresolved()):
1632 1638 raise error.Abort(_("unresolved merge conflicts "
1633 1639 "(see 'hg help resolve')"))
1634 1640 if ms.mdstate() != 's' or list(ms.driverresolved()):
1635 1641 raise error.Abort(_('driver-resolved merge conflicts'),
1636 1642 hint=_('run "hg resolve --all" to resolve'))
1637 1643
1638 1644 if editor:
1639 1645 cctx._text = editor(self, cctx, subs)
1640 1646 edited = (text != cctx._text)
1641 1647
1642 1648 # Save commit message in case this transaction gets rolled back
1643 1649 # (e.g. by a pretxncommit hook). Leave the content alone on
1644 1650 # the assumption that the user will use the same editor again.
1645 1651 msgfn = self.savecommitmessage(cctx._text)
1646 1652
1647 1653 # commit subs and write new state
1648 1654 if subs:
1649 1655 for s in sorted(commitsubs):
1650 1656 sub = wctx.sub(s)
1651 1657 self.ui.status(_('committing subrepository %s\n') %
1652 1658 subrepo.subrelpath(sub))
1653 1659 sr = sub.commit(cctx._text, user, date)
1654 1660 newstate[s] = (newstate[s][0], sr)
1655 1661 subrepo.writestate(self, newstate)
1656 1662
1657 1663 p1, p2 = self.dirstate.parents()
1658 1664 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1659 1665 try:
1660 1666 self.hook("precommit", throw=True, parent1=hookp1,
1661 1667 parent2=hookp2)
1662 1668 tr = self.transaction('commit')
1663 1669 ret = self.commitctx(cctx, True)
1664 1670 except: # re-raises
1665 1671 if edited:
1666 1672 self.ui.write(
1667 1673 _('note: commit message saved in %s\n') % msgfn)
1668 1674 raise
1669 1675 # update bookmarks, dirstate and mergestate
1670 1676 bookmarks.update(self, [p1, p2], ret)
1671 1677 cctx.markcommitted(ret)
1672 1678 ms.reset()
1673 1679 tr.close()
1674 1680
1675 1681 finally:
1676 1682 lockmod.release(tr, lock, wlock)
1677 1683
1678 1684 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1679 1685 # hack for command that use a temporary commit (eg: histedit)
1680 1686 # temporary commit got stripped before hook release
1681 1687 if self.changelog.hasnode(ret):
1682 1688 self.hook("commit", node=node, parent1=parent1,
1683 1689 parent2=parent2)
1684 1690 self._afterlock(commithook)
1685 1691 return ret
1686 1692
1687 1693 @unfilteredmethod
1688 1694 def commitctx(self, ctx, error=False):
1689 1695 """Add a new revision to current repository.
1690 1696 Revision information is passed via the context argument.
1691 1697 """
1692 1698
1693 1699 tr = None
1694 1700 p1, p2 = ctx.p1(), ctx.p2()
1695 1701 user = ctx.user()
1696 1702
1697 1703 lock = self.lock()
1698 1704 try:
1699 1705 tr = self.transaction("commit")
1700 1706 trp = weakref.proxy(tr)
1701 1707
1702 1708 if ctx.files():
1703 1709 m1 = p1.manifest()
1704 1710 m2 = p2.manifest()
1705 1711 m = m1.copy()
1706 1712
1707 1713 # check in files
1708 1714 added = []
1709 1715 changed = []
1710 1716 removed = list(ctx.removed())
1711 1717 linkrev = len(self)
1712 1718 self.ui.note(_("committing files:\n"))
1713 1719 for f in sorted(ctx.modified() + ctx.added()):
1714 1720 self.ui.note(f + "\n")
1715 1721 try:
1716 1722 fctx = ctx[f]
1717 1723 if fctx is None:
1718 1724 removed.append(f)
1719 1725 else:
1720 1726 added.append(f)
1721 1727 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1722 1728 trp, changed)
1723 1729 m.setflag(f, fctx.flags())
1724 1730 except OSError as inst:
1725 1731 self.ui.warn(_("trouble committing %s!\n") % f)
1726 1732 raise
1727 1733 except IOError as inst:
1728 1734 errcode = getattr(inst, 'errno', errno.ENOENT)
1729 1735 if error or errcode and errcode != errno.ENOENT:
1730 1736 self.ui.warn(_("trouble committing %s!\n") % f)
1731 1737 raise
1732 1738
1733 1739 # update manifest
1734 1740 self.ui.note(_("committing manifest\n"))
1735 1741 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1736 1742 drop = [f for f in removed if f in m]
1737 1743 for f in drop:
1738 1744 del m[f]
1739 1745 mn = self.manifestlog.add(m, trp, linkrev,
1740 1746 p1.manifestnode(), p2.manifestnode(),
1741 1747 added, drop)
1742 1748 files = changed + removed
1743 1749 else:
1744 1750 mn = p1.manifestnode()
1745 1751 files = []
1746 1752
1747 1753 # update changelog
1748 1754 self.ui.note(_("committing changelog\n"))
1749 1755 self.changelog.delayupdate(tr)
1750 1756 n = self.changelog.add(mn, files, ctx.description(),
1751 1757 trp, p1.node(), p2.node(),
1752 1758 user, ctx.date(), ctx.extra().copy())
1753 1759 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1754 1760 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1755 1761 parent2=xp2)
1756 1762 # set the new commit is proper phase
1757 1763 targetphase = subrepo.newcommitphase(self.ui, ctx)
1758 1764 if targetphase:
1759 1765 # retract boundary do not alter parent changeset.
1760 1766 # if a parent have higher the resulting phase will
1761 1767 # be compliant anyway
1762 1768 #
1763 1769 # if minimal phase was 0 we don't need to retract anything
1764 1770 phases.retractboundary(self, tr, targetphase, [n])
1765 1771 tr.close()
1766 1772 branchmap.updatecache(self.filtered('served'))
1767 1773 return n
1768 1774 finally:
1769 1775 if tr:
1770 1776 tr.release()
1771 1777 lock.release()
1772 1778
1773 1779 @unfilteredmethod
1774 1780 def destroying(self):
1775 1781 '''Inform the repository that nodes are about to be destroyed.
1776 1782 Intended for use by strip and rollback, so there's a common
1777 1783 place for anything that has to be done before destroying history.
1778 1784
1779 1785 This is mostly useful for saving state that is in memory and waiting
1780 1786 to be flushed when the current lock is released. Because a call to
1781 1787 destroyed is imminent, the repo will be invalidated causing those
1782 1788 changes to stay in memory (waiting for the next unlock), or vanish
1783 1789 completely.
1784 1790 '''
1785 1791 # When using the same lock to commit and strip, the phasecache is left
1786 1792 # dirty after committing. Then when we strip, the repo is invalidated,
1787 1793 # causing those changes to disappear.
1788 1794 if '_phasecache' in vars(self):
1789 1795 self._phasecache.write()
1790 1796
1791 1797 @unfilteredmethod
1792 1798 def destroyed(self):
1793 1799 '''Inform the repository that nodes have been destroyed.
1794 1800 Intended for use by strip and rollback, so there's a common
1795 1801 place for anything that has to be done after destroying history.
1796 1802 '''
1797 1803 # When one tries to:
1798 1804 # 1) destroy nodes thus calling this method (e.g. strip)
1799 1805 # 2) use phasecache somewhere (e.g. commit)
1800 1806 #
1801 1807 # then 2) will fail because the phasecache contains nodes that were
1802 1808 # removed. We can either remove phasecache from the filecache,
1803 1809 # causing it to reload next time it is accessed, or simply filter
1804 1810 # the removed nodes now and write the updated cache.
1805 1811 self._phasecache.filterunknown(self)
1806 1812 self._phasecache.write()
1807 1813
1808 1814 # update the 'served' branch cache to help read only server process
1809 1815 # Thanks to branchcache collaboration this is done from the nearest
1810 1816 # filtered subset and it is expected to be fast.
1811 1817 branchmap.updatecache(self.filtered('served'))
1812 1818
1813 1819 # Ensure the persistent tag cache is updated. Doing it now
1814 1820 # means that the tag cache only has to worry about destroyed
1815 1821 # heads immediately after a strip/rollback. That in turn
1816 1822 # guarantees that "cachetip == currenttip" (comparing both rev
1817 1823 # and node) always means no nodes have been added or destroyed.
1818 1824
1819 1825 # XXX this is suboptimal when qrefresh'ing: we strip the current
1820 1826 # head, refresh the tag cache, then immediately add a new head.
1821 1827 # But I think doing it this way is necessary for the "instant
1822 1828 # tag cache retrieval" case to work.
1823 1829 self.invalidate()
1824 1830
1825 1831 def walk(self, match, node=None):
1826 1832 '''
1827 1833 walk recursively through the directory tree or a given
1828 1834 changeset, finding all files matched by the match
1829 1835 function
1830 1836 '''
1831 1837 return self[node].walk(match)
1832 1838
1833 1839 def status(self, node1='.', node2=None, match=None,
1834 1840 ignored=False, clean=False, unknown=False,
1835 1841 listsubrepos=False):
1836 1842 '''a convenience method that calls node1.status(node2)'''
1837 1843 return self[node1].status(node2, match, ignored, clean, unknown,
1838 1844 listsubrepos)
1839 1845
1840 1846 def heads(self, start=None):
1841 1847 heads = self.changelog.heads(start)
1842 1848 # sort the output in rev descending order
1843 1849 return sorted(heads, key=self.changelog.rev, reverse=True)
1844 1850
1845 1851 def branchheads(self, branch=None, start=None, closed=False):
1846 1852 '''return a (possibly filtered) list of heads for the given branch
1847 1853
1848 1854 Heads are returned in topological order, from newest to oldest.
1849 1855 If branch is None, use the dirstate branch.
1850 1856 If start is not None, return only heads reachable from start.
1851 1857 If closed is True, return heads that are marked as closed as well.
1852 1858 '''
1853 1859 if branch is None:
1854 1860 branch = self[None].branch()
1855 1861 branches = self.branchmap()
1856 1862 if branch not in branches:
1857 1863 return []
1858 1864 # the cache returns heads ordered lowest to highest
1859 1865 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1860 1866 if start is not None:
1861 1867 # filter out the heads that cannot be reached from startrev
1862 1868 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1863 1869 bheads = [h for h in bheads if h in fbheads]
1864 1870 return bheads
1865 1871
1866 1872 def branches(self, nodes):
1867 1873 if not nodes:
1868 1874 nodes = [self.changelog.tip()]
1869 1875 b = []
1870 1876 for n in nodes:
1871 1877 t = n
1872 1878 while True:
1873 1879 p = self.changelog.parents(n)
1874 1880 if p[1] != nullid or p[0] == nullid:
1875 1881 b.append((t, n, p[0], p[1]))
1876 1882 break
1877 1883 n = p[0]
1878 1884 return b
1879 1885
1880 1886 def between(self, pairs):
1881 1887 r = []
1882 1888
1883 1889 for top, bottom in pairs:
1884 1890 n, l, i = top, [], 0
1885 1891 f = 1
1886 1892
1887 1893 while n != bottom and n != nullid:
1888 1894 p = self.changelog.parents(n)[0]
1889 1895 if i == f:
1890 1896 l.append(n)
1891 1897 f = f * 2
1892 1898 n = p
1893 1899 i += 1
1894 1900
1895 1901 r.append(l)
1896 1902
1897 1903 return r
1898 1904
1899 1905 def checkpush(self, pushop):
1900 1906 """Extensions can override this function if additional checks have
1901 1907 to be performed before pushing, or call it if they override push
1902 1908 command.
1903 1909 """
1904 1910 pass
1905 1911
1906 1912 @unfilteredpropertycache
1907 1913 def prepushoutgoinghooks(self):
1908 1914 """Return util.hooks consists of a pushop with repo, remote, outgoing
1909 1915 methods, which are called before pushing changesets.
1910 1916 """
1911 1917 return util.hooks()
1912 1918
1913 1919 def pushkey(self, namespace, key, old, new):
1914 1920 try:
1915 1921 tr = self.currenttransaction()
1916 1922 hookargs = {}
1917 1923 if tr is not None:
1918 1924 hookargs.update(tr.hookargs)
1919 1925 hookargs['namespace'] = namespace
1920 1926 hookargs['key'] = key
1921 1927 hookargs['old'] = old
1922 1928 hookargs['new'] = new
1923 1929 self.hook('prepushkey', throw=True, **hookargs)
1924 1930 except error.HookAbort as exc:
1925 1931 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1926 1932 if exc.hint:
1927 1933 self.ui.write_err(_("(%s)\n") % exc.hint)
1928 1934 return False
1929 1935 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1930 1936 ret = pushkey.push(self, namespace, key, old, new)
1931 1937 def runhook():
1932 1938 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1933 1939 ret=ret)
1934 1940 self._afterlock(runhook)
1935 1941 return ret
1936 1942
1937 1943 def listkeys(self, namespace):
1938 1944 self.hook('prelistkeys', throw=True, namespace=namespace)
1939 1945 self.ui.debug('listing keys for "%s"\n' % namespace)
1940 1946 values = pushkey.list(self, namespace)
1941 1947 self.hook('listkeys', namespace=namespace, values=values)
1942 1948 return values
1943 1949
1944 1950 def debugwireargs(self, one, two, three=None, four=None, five=None):
1945 1951 '''used to test argument passing over the wire'''
1946 1952 return "%s %s %s %s %s" % (one, two, three, four, five)
1947 1953
1948 1954 def savecommitmessage(self, text):
1949 1955 fp = self.vfs('last-message.txt', 'wb')
1950 1956 try:
1951 1957 fp.write(text)
1952 1958 finally:
1953 1959 fp.close()
1954 1960 return self.pathto(fp.name[len(self.root) + 1:])
1955 1961
1956 1962 # used to avoid circular references so destructors work
1957 1963 def aftertrans(files):
1958 1964 renamefiles = [tuple(t) for t in files]
1959 1965 def a():
1960 1966 for vfs, src, dest in renamefiles:
1961 1967 try:
1962 1968 vfs.rename(src, dest)
1963 1969 except OSError: # journal file does not yet exist
1964 1970 pass
1965 1971 return a
1966 1972
1967 1973 def undoname(fn):
1968 1974 base, name = os.path.split(fn)
1969 1975 assert name.startswith('journal')
1970 1976 return os.path.join(base, name.replace('journal', 'undo', 1))
1971 1977
1972 1978 def instance(ui, path, create):
1973 1979 return localrepository(ui, util.urllocalpath(path), create)
1974 1980
1975 1981 def islocal(path):
1976 1982 return True
1977 1983
1978 1984 def newreporequirements(repo):
1979 1985 """Determine the set of requirements for a new local repository.
1980 1986
1981 1987 Extensions can wrap this function to specify custom requirements for
1982 1988 new repositories.
1983 1989 """
1984 1990 ui = repo.ui
1985 1991 requirements = set(['revlogv1'])
1986 1992 if ui.configbool('format', 'usestore', True):
1987 1993 requirements.add('store')
1988 1994 if ui.configbool('format', 'usefncache', True):
1989 1995 requirements.add('fncache')
1990 1996 if ui.configbool('format', 'dotencode', True):
1991 1997 requirements.add('dotencode')
1992 1998
1993 1999 if scmutil.gdinitconfig(ui):
1994 2000 requirements.add('generaldelta')
1995 2001 if ui.configbool('experimental', 'treemanifest', False):
1996 2002 requirements.add('treemanifest')
1997 2003 if ui.configbool('experimental', 'manifestv2', False):
1998 2004 requirements.add('manifestv2')
1999 2005
2000 2006 return requirements
@@ -1,262 +1,261 b''
1 1 # unionrepo.py - repository class for viewing union of repository changesets
2 2 #
3 3 # Derived from bundlerepo.py
4 4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
5 5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Repository class for "in-memory pull" of one local repository to another,
11 11 allowing operations like diff and log with revsets.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import os
17 17
18 18 from .i18n import _
19 19 from .node import nullid
20 20
21 21 from . import (
22 22 changelog,
23 23 cmdutil,
24 24 error,
25 25 filelog,
26 26 localrepo,
27 27 manifest,
28 28 mdiff,
29 29 pathutil,
30 30 revlog,
31 31 scmutil,
32 32 util,
33 33 )
34 34
35 35 class unionrevlog(revlog.revlog):
36 36 def __init__(self, opener, indexfile, revlog2, linkmapper):
37 37 # How it works:
38 38 # To retrieve a revision, we just need to know the node id so we can
39 39 # look it up in revlog2.
40 40 #
41 41 # To differentiate a rev in the second revlog from a rev in the revlog,
42 42 # we check revision against repotiprev.
43 43 opener = scmutil.readonlyvfs(opener)
44 44 revlog.revlog.__init__(self, opener, indexfile)
45 45 self.revlog2 = revlog2
46 46
47 47 n = len(self)
48 48 self.repotiprev = n - 1
49 49 self.bundlerevs = set() # used by 'bundle()' revset expression
50 50 for rev2 in self.revlog2:
51 51 rev = self.revlog2.index[rev2]
52 52 # rev numbers - in revlog2, very different from self.rev
53 53 _start, _csize, _rsize, base, linkrev, p1rev, p2rev, node = rev
54 54 flags = _start & 0xFFFF
55 55
56 56 if linkmapper is None: # link is to same revlog
57 57 assert linkrev == rev2 # we never link back
58 58 link = n
59 59 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
60 60 link = linkmapper(linkrev)
61 61
62 62 if linkmapper is not None: # link is to same revlog
63 63 base = linkmapper(base)
64 64
65 65 if node in self.nodemap:
66 66 # this happens for the common revlog revisions
67 67 self.bundlerevs.add(self.nodemap[node])
68 68 continue
69 69
70 70 p1node = self.revlog2.node(p1rev)
71 71 p2node = self.revlog2.node(p2rev)
72 72
73 73 e = (flags, None, None, base,
74 74 link, self.rev(p1node), self.rev(p2node), node)
75 75 self.index.insert(-1, e)
76 76 self.nodemap[node] = n
77 77 self.bundlerevs.add(n)
78 78 n += 1
79 79
80 80 def _chunk(self, rev):
81 81 if rev <= self.repotiprev:
82 82 return revlog.revlog._chunk(self, rev)
83 83 return self.revlog2._chunk(self.node(rev))
84 84
85 85 def revdiff(self, rev1, rev2):
86 86 """return or calculate a delta between two revisions"""
87 87 if rev1 > self.repotiprev and rev2 > self.repotiprev:
88 88 return self.revlog2.revdiff(
89 89 self.revlog2.rev(self.node(rev1)),
90 90 self.revlog2.rev(self.node(rev2)))
91 91 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
92 92 return self.baserevdiff(rev1, rev2)
93 93
94 94 return mdiff.textdiff(self.revision(self.node(rev1)),
95 95 self.revision(self.node(rev2)))
96 96
97 97 def revision(self, nodeorrev):
98 98 """return an uncompressed revision of a given node or revision
99 99 number.
100 100 """
101 101 if isinstance(nodeorrev, int):
102 102 rev = nodeorrev
103 103 node = self.node(rev)
104 104 else:
105 105 node = nodeorrev
106 106 rev = self.rev(node)
107 107
108 108 if node == nullid:
109 109 return ""
110 110
111 111 if rev > self.repotiprev:
112 112 text = self.revlog2.revision(node)
113 113 self._cache = (node, rev, text)
114 114 else:
115 115 text = self.baserevision(rev)
116 116 # already cached
117 117 return text
118 118
119 119 def baserevision(self, nodeorrev):
120 120 # Revlog subclasses may override 'revision' method to modify format of
121 121 # content retrieved from revlog. To use unionrevlog with such class one
122 122 # needs to override 'baserevision' and make more specific call here.
123 123 return revlog.revlog.revision(self, nodeorrev)
124 124
125 125 def baserevdiff(self, rev1, rev2):
126 126 # Exists for the same purpose as baserevision.
127 127 return revlog.revlog.revdiff(self, rev1, rev2)
128 128
129 129 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
130 130 raise NotImplementedError
131 131 def addgroup(self, revs, linkmapper, transaction):
132 132 raise NotImplementedError
133 133 def strip(self, rev, minlink):
134 134 raise NotImplementedError
135 135 def checksize(self):
136 136 raise NotImplementedError
137 137
138 138 class unionchangelog(unionrevlog, changelog.changelog):
139 139 def __init__(self, opener, opener2):
140 140 changelog.changelog.__init__(self, opener)
141 141 linkmapper = None
142 142 changelog2 = changelog.changelog(opener2)
143 143 unionrevlog.__init__(self, opener, self.indexfile, changelog2,
144 144 linkmapper)
145 145
146 146 def baserevision(self, nodeorrev):
147 147 # Although changelog doesn't override 'revision' method, some extensions
148 148 # may replace this class with another that does. Same story with
149 149 # manifest and filelog classes.
150 150 return changelog.changelog.revision(self, nodeorrev)
151 151
152 152 def baserevdiff(self, rev1, rev2):
153 153 return changelog.changelog.revdiff(self, rev1, rev2)
154 154
155 155 class unionmanifest(unionrevlog, manifest.manifest):
156 156 def __init__(self, opener, opener2, linkmapper):
157 157 manifest.manifest.__init__(self, opener)
158 158 manifest2 = manifest.manifest(opener2)
159 159 unionrevlog.__init__(self, opener, self.indexfile, manifest2,
160 160 linkmapper)
161 161
162 162 def baserevision(self, nodeorrev):
163 163 return manifest.manifest.revision(self, nodeorrev)
164 164
165 165 def baserevdiff(self, rev1, rev2):
166 166 return manifest.manifest.revdiff(self, rev1, rev2)
167 167
168 168 class unionfilelog(unionrevlog, filelog.filelog):
169 169 def __init__(self, opener, path, opener2, linkmapper, repo):
170 170 filelog.filelog.__init__(self, opener, path)
171 171 filelog2 = filelog.filelog(opener2, path)
172 172 unionrevlog.__init__(self, opener, self.indexfile, filelog2,
173 173 linkmapper)
174 174 self._repo = repo
175 175
176 176 def baserevision(self, nodeorrev):
177 177 return filelog.filelog.revision(self, nodeorrev)
178 178
179 179 def baserevdiff(self, rev1, rev2):
180 180 return filelog.filelog.revdiff(self, rev1, rev2)
181 181
182 182 def iscensored(self, rev):
183 183 """Check if a revision is censored."""
184 184 if rev <= self.repotiprev:
185 185 return filelog.filelog.iscensored(self, rev)
186 186 node = self.node(rev)
187 187 return self.revlog2.iscensored(self.revlog2.rev(node))
188 188
189 189 class unionpeer(localrepo.localpeer):
190 190 def canpush(self):
191 191 return False
192 192
193 193 class unionrepository(localrepo.localrepository):
194 194 def __init__(self, ui, path, path2):
195 195 localrepo.localrepository.__init__(self, ui, path)
196 196 self.ui.setconfig('phases', 'publish', False, 'unionrepo')
197 197
198 198 self._url = 'union:%s+%s' % (util.expandpath(path),
199 199 util.expandpath(path2))
200 200 self.repo2 = localrepo.localrepository(ui, path2)
201 201
202 202 @localrepo.unfilteredpropertycache
203 203 def changelog(self):
204 204 return unionchangelog(self.svfs, self.repo2.svfs)
205 205
206 206 def _clrev(self, rev2):
207 207 """map from repo2 changelog rev to temporary rev in self.changelog"""
208 208 node = self.repo2.changelog.node(rev2)
209 209 return self.changelog.rev(node)
210 210
211 @localrepo.unfilteredpropertycache
212 def manifest(self):
211 def _constructmanifest(self):
213 212 return unionmanifest(self.svfs, self.repo2.svfs,
214 213 self.unfiltered()._clrev)
215 214
216 215 def url(self):
217 216 return self._url
218 217
219 218 def file(self, f):
220 219 return unionfilelog(self.svfs, f, self.repo2.svfs,
221 220 self.unfiltered()._clrev, self)
222 221
223 222 def close(self):
224 223 self.repo2.close()
225 224
226 225 def cancopy(self):
227 226 return False
228 227
229 228 def peer(self):
230 229 return unionpeer(self)
231 230
232 231 def getcwd(self):
233 232 return os.getcwd() # always outside the repo
234 233
235 234 def instance(ui, path, create):
236 235 if create:
237 236 raise error.Abort(_('cannot create new union repository'))
238 237 parentpath = ui.config("bundle", "mainreporoot", "")
239 238 if not parentpath:
240 239 # try to find the correct path to the working directory repo
241 240 parentpath = cmdutil.findrepo(os.getcwd())
242 241 if parentpath is None:
243 242 parentpath = ''
244 243 if parentpath:
245 244 # Try to make the full path relative so we get a nice, short URL.
246 245 # In particular, we don't want temp dir names in test outputs.
247 246 cwd = os.getcwd()
248 247 if parentpath == cwd:
249 248 parentpath = ''
250 249 else:
251 250 cwd = pathutil.normasprefix(cwd)
252 251 if parentpath.startswith(cwd):
253 252 parentpath = parentpath[len(cwd):]
254 253 if path.startswith('union:'):
255 254 s = path.split(":", 1)[1].split("+", 1)
256 255 if len(s) == 1:
257 256 repopath, repopath2 = parentpath, s[0]
258 257 else:
259 258 repopath, repopath2 = s
260 259 else:
261 260 repopath, repopath2 = parentpath, path
262 261 return unionrepository(ui, repopath, repopath2)
General Comments 0
You need to be logged in to leave comments. Login now