##// END OF EJS Templates
tag: use an abort hint
Matt Mackall -
r22680:8c65cc0f default
parent child Browse files
Show More
@@ -1,1784 +1,1784 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 # a list of (ui, featureset) functions.
184 184 # only functions defined in module of enabled extensions are invoked
185 185 featuresetupfuncs = set()
186 186
187 187 def _baserequirements(self, create):
188 188 return self.requirements[:]
189 189
190 190 def __init__(self, baseui, path=None, create=False):
191 191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 192 self.wopener = self.wvfs
193 193 self.root = self.wvfs.base
194 194 self.path = self.wvfs.join(".hg")
195 195 self.origroot = path
196 196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 197 self.vfs = scmutil.vfs(self.path)
198 198 self.opener = self.vfs
199 199 self.baseui = baseui
200 200 self.ui = baseui.copy()
201 201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 202 # A list of callback to shape the phase if no data were found.
203 203 # Callback are in the form: func(repo, roots) --> processed root.
204 204 # This list it to be filled by extension during repo setup
205 205 self._phasedefaults = []
206 206 try:
207 207 self.ui.readconfig(self.join("hgrc"), self.root)
208 208 extensions.loadall(self.ui)
209 209 except IOError:
210 210 pass
211 211
212 212 if self.featuresetupfuncs:
213 213 self.supported = set(self._basesupported) # use private copy
214 214 extmods = set(m.__name__ for n, m
215 215 in extensions.extensions(self.ui))
216 216 for setupfunc in self.featuresetupfuncs:
217 217 if setupfunc.__module__ in extmods:
218 218 setupfunc(self.ui, self.supported)
219 219 else:
220 220 self.supported = self._basesupported
221 221
222 222 if not self.vfs.isdir():
223 223 if create:
224 224 if not self.wvfs.exists():
225 225 self.wvfs.makedirs()
226 226 self.vfs.makedir(notindexed=True)
227 227 requirements = self._baserequirements(create)
228 228 if self.ui.configbool('format', 'usestore', True):
229 229 self.vfs.mkdir("store")
230 230 requirements.append("store")
231 231 if self.ui.configbool('format', 'usefncache', True):
232 232 requirements.append("fncache")
233 233 if self.ui.configbool('format', 'dotencode', True):
234 234 requirements.append('dotencode')
235 235 # create an invalid changelog
236 236 self.vfs.append(
237 237 "00changelog.i",
238 238 '\0\0\0\2' # represents revlogv2
239 239 ' dummy changelog to prevent using the old repo layout'
240 240 )
241 241 if self.ui.configbool('format', 'generaldelta', False):
242 242 requirements.append("generaldelta")
243 243 requirements = set(requirements)
244 244 else:
245 245 raise error.RepoError(_("repository %s not found") % path)
246 246 elif create:
247 247 raise error.RepoError(_("repository %s already exists") % path)
248 248 else:
249 249 try:
250 250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 251 except IOError, inst:
252 252 if inst.errno != errno.ENOENT:
253 253 raise
254 254 requirements = set()
255 255
256 256 self.sharedpath = self.path
257 257 try:
258 258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 259 realpath=True)
260 260 s = vfs.base
261 261 if not vfs.exists():
262 262 raise error.RepoError(
263 263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 264 self.sharedpath = s
265 265 except IOError, inst:
266 266 if inst.errno != errno.ENOENT:
267 267 raise
268 268
269 269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 270 self.spath = self.store.path
271 271 self.svfs = self.store.vfs
272 272 self.sopener = self.svfs
273 273 self.sjoin = self.store.join
274 274 self.vfs.createmode = self.store.createmode
275 275 self._applyrequirements(requirements)
276 276 if create:
277 277 self._writerequirements()
278 278
279 279
280 280 self._branchcaches = {}
281 281 self.filterpats = {}
282 282 self._datafilters = {}
283 283 self._transref = self._lockref = self._wlockref = None
284 284
285 285 # A cache for various files under .hg/ that tracks file changes,
286 286 # (used by the filecache decorator)
287 287 #
288 288 # Maps a property name to its util.filecacheentry
289 289 self._filecache = {}
290 290
291 291 # hold sets of revision to be filtered
292 292 # should be cleared when something might have changed the filter value:
293 293 # - new changesets,
294 294 # - phase change,
295 295 # - new obsolescence marker,
296 296 # - working directory parent change,
297 297 # - bookmark changes
298 298 self.filteredrevcache = {}
299 299
300 300 def close(self):
301 301 pass
302 302
303 303 def _restrictcapabilities(self, caps):
304 304 # bundle2 is not ready for prime time, drop it unless explicitly
305 305 # required by the tests (or some brave tester)
306 306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 307 caps = set(caps)
308 308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 310 return caps
311 311
312 312 def _applyrequirements(self, requirements):
313 313 self.requirements = requirements
314 314 self.sopener.options = dict((r, 1) for r in requirements
315 315 if r in self.openerreqs)
316 316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 317 if chunkcachesize is not None:
318 318 self.sopener.options['chunkcachesize'] = chunkcachesize
319 319
320 320 def _writerequirements(self):
321 321 reqfile = self.opener("requires", "w")
322 322 for r in sorted(self.requirements):
323 323 reqfile.write("%s\n" % r)
324 324 reqfile.close()
325 325
326 326 def _checknested(self, path):
327 327 """Determine if path is a legal nested repository."""
328 328 if not path.startswith(self.root):
329 329 return False
330 330 subpath = path[len(self.root) + 1:]
331 331 normsubpath = util.pconvert(subpath)
332 332
333 333 # XXX: Checking against the current working copy is wrong in
334 334 # the sense that it can reject things like
335 335 #
336 336 # $ hg cat -r 10 sub/x.txt
337 337 #
338 338 # if sub/ is no longer a subrepository in the working copy
339 339 # parent revision.
340 340 #
341 341 # However, it can of course also allow things that would have
342 342 # been rejected before, such as the above cat command if sub/
343 343 # is a subrepository now, but was a normal directory before.
344 344 # The old path auditor would have rejected by mistake since it
345 345 # panics when it sees sub/.hg/.
346 346 #
347 347 # All in all, checking against the working copy seems sensible
348 348 # since we want to prevent access to nested repositories on
349 349 # the filesystem *now*.
350 350 ctx = self[None]
351 351 parts = util.splitpath(subpath)
352 352 while parts:
353 353 prefix = '/'.join(parts)
354 354 if prefix in ctx.substate:
355 355 if prefix == normsubpath:
356 356 return True
357 357 else:
358 358 sub = ctx.sub(prefix)
359 359 return sub.checknested(subpath[len(prefix) + 1:])
360 360 else:
361 361 parts.pop()
362 362 return False
363 363
364 364 def peer(self):
365 365 return localpeer(self) # not cached to avoid reference cycle
366 366
367 367 def unfiltered(self):
368 368 """Return unfiltered version of the repository
369 369
370 370 Intended to be overwritten by filtered repo."""
371 371 return self
372 372
373 373 def filtered(self, name):
374 374 """Return a filtered version of a repository"""
375 375 # build a new class with the mixin and the current class
376 376 # (possibly subclass of the repo)
377 377 class proxycls(repoview.repoview, self.unfiltered().__class__):
378 378 pass
379 379 return proxycls(self, name)
380 380
381 381 @repofilecache('bookmarks')
382 382 def _bookmarks(self):
383 383 return bookmarks.bmstore(self)
384 384
385 385 @repofilecache('bookmarks.current')
386 386 def _bookmarkcurrent(self):
387 387 return bookmarks.readcurrent(self)
388 388
389 389 def bookmarkheads(self, bookmark):
390 390 name = bookmark.split('@', 1)[0]
391 391 heads = []
392 392 for mark, n in self._bookmarks.iteritems():
393 393 if mark.split('@', 1)[0] == name:
394 394 heads.append(n)
395 395 return heads
396 396
397 397 @storecache('phaseroots')
398 398 def _phasecache(self):
399 399 return phases.phasecache(self, self._phasedefaults)
400 400
401 401 @storecache('obsstore')
402 402 def obsstore(self):
403 403 store = obsolete.obsstore(self.sopener)
404 404 if store and not obsolete._enabled:
405 405 # message is rare enough to not be translated
406 406 msg = 'obsolete feature not enabled but %i markers found!\n'
407 407 self.ui.warn(msg % len(list(store)))
408 408 return store
409 409
410 410 @storecache('00changelog.i')
411 411 def changelog(self):
412 412 c = changelog.changelog(self.sopener)
413 413 if 'HG_PENDING' in os.environ:
414 414 p = os.environ['HG_PENDING']
415 415 if p.startswith(self.root):
416 416 c.readpending('00changelog.i.a')
417 417 return c
418 418
419 419 @storecache('00manifest.i')
420 420 def manifest(self):
421 421 return manifest.manifest(self.sopener)
422 422
423 423 @repofilecache('dirstate')
424 424 def dirstate(self):
425 425 warned = [0]
426 426 def validate(node):
427 427 try:
428 428 self.changelog.rev(node)
429 429 return node
430 430 except error.LookupError:
431 431 if not warned[0]:
432 432 warned[0] = True
433 433 self.ui.warn(_("warning: ignoring unknown"
434 434 " working parent %s!\n") % short(node))
435 435 return nullid
436 436
437 437 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
438 438
439 439 def __getitem__(self, changeid):
440 440 if changeid is None:
441 441 return context.workingctx(self)
442 442 return context.changectx(self, changeid)
443 443
444 444 def __contains__(self, changeid):
445 445 try:
446 446 return bool(self.lookup(changeid))
447 447 except error.RepoLookupError:
448 448 return False
449 449
450 450 def __nonzero__(self):
451 451 return True
452 452
453 453 def __len__(self):
454 454 return len(self.changelog)
455 455
456 456 def __iter__(self):
457 457 return iter(self.changelog)
458 458
459 459 def revs(self, expr, *args):
460 460 '''Return a list of revisions matching the given revset'''
461 461 expr = revset.formatspec(expr, *args)
462 462 m = revset.match(None, expr)
463 463 return m(self, revset.spanset(self))
464 464
465 465 def set(self, expr, *args):
466 466 '''
467 467 Yield a context for each matching revision, after doing arg
468 468 replacement via revset.formatspec
469 469 '''
470 470 for r in self.revs(expr, *args):
471 471 yield self[r]
472 472
473 473 def url(self):
474 474 return 'file:' + self.root
475 475
476 476 def hook(self, name, throw=False, **args):
477 477 """Call a hook, passing this repo instance.
478 478
479 479 This a convenience method to aid invoking hooks. Extensions likely
480 480 won't call this unless they have registered a custom hook or are
481 481 replacing code that is expected to call a hook.
482 482 """
483 483 return hook.hook(self.ui, self, name, throw, **args)
484 484
485 485 @unfilteredmethod
486 486 def _tag(self, names, node, message, local, user, date, extra={},
487 487 editor=False):
488 488 if isinstance(names, str):
489 489 names = (names,)
490 490
491 491 branches = self.branchmap()
492 492 for name in names:
493 493 self.hook('pretag', throw=True, node=hex(node), tag=name,
494 494 local=local)
495 495 if name in branches:
496 496 self.ui.warn(_("warning: tag %s conflicts with existing"
497 497 " branch name\n") % name)
498 498
499 499 def writetags(fp, names, munge, prevtags):
500 500 fp.seek(0, 2)
501 501 if prevtags and prevtags[-1] != '\n':
502 502 fp.write('\n')
503 503 for name in names:
504 504 m = munge and munge(name) or name
505 505 if (self._tagscache.tagtypes and
506 506 name in self._tagscache.tagtypes):
507 507 old = self.tags().get(name, nullid)
508 508 fp.write('%s %s\n' % (hex(old), m))
509 509 fp.write('%s %s\n' % (hex(node), m))
510 510 fp.close()
511 511
512 512 prevtags = ''
513 513 if local:
514 514 try:
515 515 fp = self.opener('localtags', 'r+')
516 516 except IOError:
517 517 fp = self.opener('localtags', 'a')
518 518 else:
519 519 prevtags = fp.read()
520 520
521 521 # local tags are stored in the current charset
522 522 writetags(fp, names, None, prevtags)
523 523 for name in names:
524 524 self.hook('tag', node=hex(node), tag=name, local=local)
525 525 return
526 526
527 527 try:
528 528 fp = self.wfile('.hgtags', 'rb+')
529 529 except IOError, e:
530 530 if e.errno != errno.ENOENT:
531 531 raise
532 532 fp = self.wfile('.hgtags', 'ab')
533 533 else:
534 534 prevtags = fp.read()
535 535
536 536 # committed tags are stored in UTF-8
537 537 writetags(fp, names, encoding.fromlocal, prevtags)
538 538
539 539 fp.close()
540 540
541 541 self.invalidatecaches()
542 542
543 543 if '.hgtags' not in self.dirstate:
544 544 self[None].add(['.hgtags'])
545 545
546 546 m = matchmod.exact(self.root, '', ['.hgtags'])
547 547 tagnode = self.commit(message, user, date, extra=extra, match=m,
548 548 editor=editor)
549 549
550 550 for name in names:
551 551 self.hook('tag', node=hex(node), tag=name, local=local)
552 552
553 553 return tagnode
554 554
555 555 def tag(self, names, node, message, local, user, date, editor=False):
556 556 '''tag a revision with one or more symbolic names.
557 557
558 558 names is a list of strings or, when adding a single tag, names may be a
559 559 string.
560 560
561 561 if local is True, the tags are stored in a per-repository file.
562 562 otherwise, they are stored in the .hgtags file, and a new
563 563 changeset is committed with the change.
564 564
565 565 keyword arguments:
566 566
567 567 local: whether to store tags in non-version-controlled file
568 568 (default False)
569 569
570 570 message: commit message to use if committing
571 571
572 572 user: name of user to use if committing
573 573
574 574 date: date tuple to use if committing'''
575 575
576 576 if not local:
577 577 m = matchmod.exact(self.root, '', ['.hgtags'])
578 578 if util.any(self.status(match=m, unknown=True, ignored=True)):
579 raise util.Abort(_('working copy of .hgtags is changed '
580 '(please commit .hgtags manually)'))
579 raise util.Abort(_('working copy of .hgtags is changed'),
580 hint=_('please commit .hgtags manually'))
581 581
582 582 self.tags() # instantiate the cache
583 583 self._tag(names, node, message, local, user, date, editor=editor)
584 584
585 585 @filteredpropertycache
586 586 def _tagscache(self):
587 587 '''Returns a tagscache object that contains various tags related
588 588 caches.'''
589 589
590 590 # This simplifies its cache management by having one decorated
591 591 # function (this one) and the rest simply fetch things from it.
592 592 class tagscache(object):
593 593 def __init__(self):
594 594 # These two define the set of tags for this repository. tags
595 595 # maps tag name to node; tagtypes maps tag name to 'global' or
596 596 # 'local'. (Global tags are defined by .hgtags across all
597 597 # heads, and local tags are defined in .hg/localtags.)
598 598 # They constitute the in-memory cache of tags.
599 599 self.tags = self.tagtypes = None
600 600
601 601 self.nodetagscache = self.tagslist = None
602 602
603 603 cache = tagscache()
604 604 cache.tags, cache.tagtypes = self._findtags()
605 605
606 606 return cache
607 607
608 608 def tags(self):
609 609 '''return a mapping of tag to node'''
610 610 t = {}
611 611 if self.changelog.filteredrevs:
612 612 tags, tt = self._findtags()
613 613 else:
614 614 tags = self._tagscache.tags
615 615 for k, v in tags.iteritems():
616 616 try:
617 617 # ignore tags to unknown nodes
618 618 self.changelog.rev(v)
619 619 t[k] = v
620 620 except (error.LookupError, ValueError):
621 621 pass
622 622 return t
623 623
624 624 def _findtags(self):
625 625 '''Do the hard work of finding tags. Return a pair of dicts
626 626 (tags, tagtypes) where tags maps tag name to node, and tagtypes
627 627 maps tag name to a string like \'global\' or \'local\'.
628 628 Subclasses or extensions are free to add their own tags, but
629 629 should be aware that the returned dicts will be retained for the
630 630 duration of the localrepo object.'''
631 631
632 632 # XXX what tagtype should subclasses/extensions use? Currently
633 633 # mq and bookmarks add tags, but do not set the tagtype at all.
634 634 # Should each extension invent its own tag type? Should there
635 635 # be one tagtype for all such "virtual" tags? Or is the status
636 636 # quo fine?
637 637
638 638 alltags = {} # map tag name to (node, hist)
639 639 tagtypes = {}
640 640
641 641 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
642 642 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
643 643
644 644 # Build the return dicts. Have to re-encode tag names because
645 645 # the tags module always uses UTF-8 (in order not to lose info
646 646 # writing to the cache), but the rest of Mercurial wants them in
647 647 # local encoding.
648 648 tags = {}
649 649 for (name, (node, hist)) in alltags.iteritems():
650 650 if node != nullid:
651 651 tags[encoding.tolocal(name)] = node
652 652 tags['tip'] = self.changelog.tip()
653 653 tagtypes = dict([(encoding.tolocal(name), value)
654 654 for (name, value) in tagtypes.iteritems()])
655 655 return (tags, tagtypes)
656 656
657 657 def tagtype(self, tagname):
658 658 '''
659 659 return the type of the given tag. result can be:
660 660
661 661 'local' : a local tag
662 662 'global' : a global tag
663 663 None : tag does not exist
664 664 '''
665 665
666 666 return self._tagscache.tagtypes.get(tagname)
667 667
668 668 def tagslist(self):
669 669 '''return a list of tags ordered by revision'''
670 670 if not self._tagscache.tagslist:
671 671 l = []
672 672 for t, n in self.tags().iteritems():
673 673 l.append((self.changelog.rev(n), t, n))
674 674 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
675 675
676 676 return self._tagscache.tagslist
677 677
678 678 def nodetags(self, node):
679 679 '''return the tags associated with a node'''
680 680 if not self._tagscache.nodetagscache:
681 681 nodetagscache = {}
682 682 for t, n in self._tagscache.tags.iteritems():
683 683 nodetagscache.setdefault(n, []).append(t)
684 684 for tags in nodetagscache.itervalues():
685 685 tags.sort()
686 686 self._tagscache.nodetagscache = nodetagscache
687 687 return self._tagscache.nodetagscache.get(node, [])
688 688
689 689 def nodebookmarks(self, node):
690 690 marks = []
691 691 for bookmark, n in self._bookmarks.iteritems():
692 692 if n == node:
693 693 marks.append(bookmark)
694 694 return sorted(marks)
695 695
696 696 def branchmap(self):
697 697 '''returns a dictionary {branch: [branchheads]} with branchheads
698 698 ordered by increasing revision number'''
699 699 branchmap.updatecache(self)
700 700 return self._branchcaches[self.filtername]
701 701
702 702 def branchtip(self, branch):
703 703 '''return the tip node for a given branch'''
704 704 try:
705 705 return self.branchmap().branchtip(branch)
706 706 except KeyError:
707 707 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
708 708
709 709 def lookup(self, key):
710 710 return self[key].node()
711 711
712 712 def lookupbranch(self, key, remote=None):
713 713 repo = remote or self
714 714 if key in repo.branchmap():
715 715 return key
716 716
717 717 repo = (remote and remote.local()) and remote or self
718 718 return repo[key].branch()
719 719
720 720 def known(self, nodes):
721 721 nm = self.changelog.nodemap
722 722 pc = self._phasecache
723 723 result = []
724 724 for n in nodes:
725 725 r = nm.get(n)
726 726 resp = not (r is None or pc.phase(self, r) >= phases.secret)
727 727 result.append(resp)
728 728 return result
729 729
730 730 def local(self):
731 731 return self
732 732
733 733 def cancopy(self):
734 734 # so statichttprepo's override of local() works
735 735 if not self.local():
736 736 return False
737 737 if not self.ui.configbool('phases', 'publish', True):
738 738 return True
739 739 # if publishing we can't copy if there is filtered content
740 740 return not self.filtered('visible').changelog.filteredrevs
741 741
742 742 def join(self, f, *insidef):
743 743 return os.path.join(self.path, f, *insidef)
744 744
745 745 def wjoin(self, f, *insidef):
746 746 return os.path.join(self.root, f, *insidef)
747 747
748 748 def file(self, f):
749 749 if f[0] == '/':
750 750 f = f[1:]
751 751 return filelog.filelog(self.sopener, f)
752 752
753 753 def changectx(self, changeid):
754 754 return self[changeid]
755 755
756 756 def parents(self, changeid=None):
757 757 '''get list of changectxs for parents of changeid'''
758 758 return self[changeid].parents()
759 759
760 760 def setparents(self, p1, p2=nullid):
761 761 self.dirstate.beginparentchange()
762 762 copies = self.dirstate.setparents(p1, p2)
763 763 pctx = self[p1]
764 764 if copies:
765 765 # Adjust copy records, the dirstate cannot do it, it
766 766 # requires access to parents manifests. Preserve them
767 767 # only for entries added to first parent.
768 768 for f in copies:
769 769 if f not in pctx and copies[f] in pctx:
770 770 self.dirstate.copy(copies[f], f)
771 771 if p2 == nullid:
772 772 for f, s in sorted(self.dirstate.copies().items()):
773 773 if f not in pctx and s not in pctx:
774 774 self.dirstate.copy(None, f)
775 775 self.dirstate.endparentchange()
776 776
777 777 def filectx(self, path, changeid=None, fileid=None):
778 778 """changeid can be a changeset revision, node, or tag.
779 779 fileid can be a file revision or node."""
780 780 return context.filectx(self, path, changeid, fileid)
781 781
782 782 def getcwd(self):
783 783 return self.dirstate.getcwd()
784 784
785 785 def pathto(self, f, cwd=None):
786 786 return self.dirstate.pathto(f, cwd)
787 787
788 788 def wfile(self, f, mode='r'):
789 789 return self.wopener(f, mode)
790 790
791 791 def _link(self, f):
792 792 return self.wvfs.islink(f)
793 793
794 794 def _loadfilter(self, filter):
795 795 if filter not in self.filterpats:
796 796 l = []
797 797 for pat, cmd in self.ui.configitems(filter):
798 798 if cmd == '!':
799 799 continue
800 800 mf = matchmod.match(self.root, '', [pat])
801 801 fn = None
802 802 params = cmd
803 803 for name, filterfn in self._datafilters.iteritems():
804 804 if cmd.startswith(name):
805 805 fn = filterfn
806 806 params = cmd[len(name):].lstrip()
807 807 break
808 808 if not fn:
809 809 fn = lambda s, c, **kwargs: util.filter(s, c)
810 810 # Wrap old filters not supporting keyword arguments
811 811 if not inspect.getargspec(fn)[2]:
812 812 oldfn = fn
813 813 fn = lambda s, c, **kwargs: oldfn(s, c)
814 814 l.append((mf, fn, params))
815 815 self.filterpats[filter] = l
816 816 return self.filterpats[filter]
817 817
818 818 def _filter(self, filterpats, filename, data):
819 819 for mf, fn, cmd in filterpats:
820 820 if mf(filename):
821 821 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
822 822 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
823 823 break
824 824
825 825 return data
826 826
827 827 @unfilteredpropertycache
828 828 def _encodefilterpats(self):
829 829 return self._loadfilter('encode')
830 830
831 831 @unfilteredpropertycache
832 832 def _decodefilterpats(self):
833 833 return self._loadfilter('decode')
834 834
835 835 def adddatafilter(self, name, filter):
836 836 self._datafilters[name] = filter
837 837
838 838 def wread(self, filename):
839 839 if self._link(filename):
840 840 data = self.wvfs.readlink(filename)
841 841 else:
842 842 data = self.wopener.read(filename)
843 843 return self._filter(self._encodefilterpats, filename, data)
844 844
845 845 def wwrite(self, filename, data, flags):
846 846 data = self._filter(self._decodefilterpats, filename, data)
847 847 if 'l' in flags:
848 848 self.wopener.symlink(data, filename)
849 849 else:
850 850 self.wopener.write(filename, data)
851 851 if 'x' in flags:
852 852 self.wvfs.setflags(filename, False, True)
853 853
854 854 def wwritedata(self, filename, data):
855 855 return self._filter(self._decodefilterpats, filename, data)
856 856
857 857 def transaction(self, desc, report=None):
858 858 tr = self._transref and self._transref() or None
859 859 if tr and tr.running():
860 860 return tr.nest()
861 861
862 862 # abort here if the journal already exists
863 863 if self.svfs.exists("journal"):
864 864 raise error.RepoError(
865 865 _("abandoned transaction found"),
866 866 hint=_("run 'hg recover' to clean up transaction"))
867 867
868 868 def onclose():
869 869 self.store.write(self._transref())
870 870
871 871 self._writejournal(desc)
872 872 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
873 873 rp = report and report or self.ui.warn
874 874 tr = transaction.transaction(rp, self.sopener,
875 875 "journal",
876 876 aftertrans(renames),
877 877 self.store.createmode,
878 878 onclose)
879 879 self._transref = weakref.ref(tr)
880 880 return tr
881 881
882 882 def _journalfiles(self):
883 883 return ((self.svfs, 'journal'),
884 884 (self.vfs, 'journal.dirstate'),
885 885 (self.vfs, 'journal.branch'),
886 886 (self.vfs, 'journal.desc'),
887 887 (self.vfs, 'journal.bookmarks'),
888 888 (self.svfs, 'journal.phaseroots'))
889 889
890 890 def undofiles(self):
891 891 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
892 892
893 893 def _writejournal(self, desc):
894 894 self.opener.write("journal.dirstate",
895 895 self.opener.tryread("dirstate"))
896 896 self.opener.write("journal.branch",
897 897 encoding.fromlocal(self.dirstate.branch()))
898 898 self.opener.write("journal.desc",
899 899 "%d\n%s\n" % (len(self), desc))
900 900 self.opener.write("journal.bookmarks",
901 901 self.opener.tryread("bookmarks"))
902 902 self.sopener.write("journal.phaseroots",
903 903 self.sopener.tryread("phaseroots"))
904 904
905 905 def recover(self):
906 906 lock = self.lock()
907 907 try:
908 908 if self.svfs.exists("journal"):
909 909 self.ui.status(_("rolling back interrupted transaction\n"))
910 910 transaction.rollback(self.sopener, "journal",
911 911 self.ui.warn)
912 912 self.invalidate()
913 913 return True
914 914 else:
915 915 self.ui.warn(_("no interrupted transaction available\n"))
916 916 return False
917 917 finally:
918 918 lock.release()
919 919
920 920 def rollback(self, dryrun=False, force=False):
921 921 wlock = lock = None
922 922 try:
923 923 wlock = self.wlock()
924 924 lock = self.lock()
925 925 if self.svfs.exists("undo"):
926 926 return self._rollback(dryrun, force)
927 927 else:
928 928 self.ui.warn(_("no rollback information available\n"))
929 929 return 1
930 930 finally:
931 931 release(lock, wlock)
932 932
933 933 @unfilteredmethod # Until we get smarter cache management
934 934 def _rollback(self, dryrun, force):
935 935 ui = self.ui
936 936 try:
937 937 args = self.opener.read('undo.desc').splitlines()
938 938 (oldlen, desc, detail) = (int(args[0]), args[1], None)
939 939 if len(args) >= 3:
940 940 detail = args[2]
941 941 oldtip = oldlen - 1
942 942
943 943 if detail and ui.verbose:
944 944 msg = (_('repository tip rolled back to revision %s'
945 945 ' (undo %s: %s)\n')
946 946 % (oldtip, desc, detail))
947 947 else:
948 948 msg = (_('repository tip rolled back to revision %s'
949 949 ' (undo %s)\n')
950 950 % (oldtip, desc))
951 951 except IOError:
952 952 msg = _('rolling back unknown transaction\n')
953 953 desc = None
954 954
955 955 if not force and self['.'] != self['tip'] and desc == 'commit':
956 956 raise util.Abort(
957 957 _('rollback of last commit while not checked out '
958 958 'may lose data'), hint=_('use -f to force'))
959 959
960 960 ui.status(msg)
961 961 if dryrun:
962 962 return 0
963 963
964 964 parents = self.dirstate.parents()
965 965 self.destroying()
966 966 transaction.rollback(self.sopener, 'undo', ui.warn)
967 967 if self.vfs.exists('undo.bookmarks'):
968 968 self.vfs.rename('undo.bookmarks', 'bookmarks')
969 969 if self.svfs.exists('undo.phaseroots'):
970 970 self.svfs.rename('undo.phaseroots', 'phaseroots')
971 971 self.invalidate()
972 972
973 973 parentgone = (parents[0] not in self.changelog.nodemap or
974 974 parents[1] not in self.changelog.nodemap)
975 975 if parentgone:
976 976 self.vfs.rename('undo.dirstate', 'dirstate')
977 977 try:
978 978 branch = self.opener.read('undo.branch')
979 979 self.dirstate.setbranch(encoding.tolocal(branch))
980 980 except IOError:
981 981 ui.warn(_('named branch could not be reset: '
982 982 'current branch is still \'%s\'\n')
983 983 % self.dirstate.branch())
984 984
985 985 self.dirstate.invalidate()
986 986 parents = tuple([p.rev() for p in self.parents()])
987 987 if len(parents) > 1:
988 988 ui.status(_('working directory now based on '
989 989 'revisions %d and %d\n') % parents)
990 990 else:
991 991 ui.status(_('working directory now based on '
992 992 'revision %d\n') % parents)
993 993 # TODO: if we know which new heads may result from this rollback, pass
994 994 # them to destroy(), which will prevent the branchhead cache from being
995 995 # invalidated.
996 996 self.destroyed()
997 997 return 0
998 998
999 999 def invalidatecaches(self):
1000 1000
1001 1001 if '_tagscache' in vars(self):
1002 1002 # can't use delattr on proxy
1003 1003 del self.__dict__['_tagscache']
1004 1004
1005 1005 self.unfiltered()._branchcaches.clear()
1006 1006 self.invalidatevolatilesets()
1007 1007
1008 1008 def invalidatevolatilesets(self):
1009 1009 self.filteredrevcache.clear()
1010 1010 obsolete.clearobscaches(self)
1011 1011
1012 1012 def invalidatedirstate(self):
1013 1013 '''Invalidates the dirstate, causing the next call to dirstate
1014 1014 to check if it was modified since the last time it was read,
1015 1015 rereading it if it has.
1016 1016
1017 1017 This is different to dirstate.invalidate() that it doesn't always
1018 1018 rereads the dirstate. Use dirstate.invalidate() if you want to
1019 1019 explicitly read the dirstate again (i.e. restoring it to a previous
1020 1020 known good state).'''
1021 1021 if hasunfilteredcache(self, 'dirstate'):
1022 1022 for k in self.dirstate._filecache:
1023 1023 try:
1024 1024 delattr(self.dirstate, k)
1025 1025 except AttributeError:
1026 1026 pass
1027 1027 delattr(self.unfiltered(), 'dirstate')
1028 1028
1029 1029 def invalidate(self):
1030 1030 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1031 1031 for k in self._filecache:
1032 1032 # dirstate is invalidated separately in invalidatedirstate()
1033 1033 if k == 'dirstate':
1034 1034 continue
1035 1035
1036 1036 try:
1037 1037 delattr(unfiltered, k)
1038 1038 except AttributeError:
1039 1039 pass
1040 1040 self.invalidatecaches()
1041 1041 self.store.invalidatecaches()
1042 1042
1043 1043 def invalidateall(self):
1044 1044 '''Fully invalidates both store and non-store parts, causing the
1045 1045 subsequent operation to reread any outside changes.'''
1046 1046 # extension should hook this to invalidate its caches
1047 1047 self.invalidate()
1048 1048 self.invalidatedirstate()
1049 1049
1050 1050 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1051 1051 try:
1052 1052 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1053 1053 except error.LockHeld, inst:
1054 1054 if not wait:
1055 1055 raise
1056 1056 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1057 1057 (desc, inst.locker))
1058 1058 # default to 600 seconds timeout
1059 1059 l = lockmod.lock(vfs, lockname,
1060 1060 int(self.ui.config("ui", "timeout", "600")),
1061 1061 releasefn, desc=desc)
1062 1062 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1063 1063 if acquirefn:
1064 1064 acquirefn()
1065 1065 return l
1066 1066
1067 1067 def _afterlock(self, callback):
1068 1068 """add a callback to the current repository lock.
1069 1069
1070 1070 The callback will be executed on lock release."""
1071 1071 l = self._lockref and self._lockref()
1072 1072 if l:
1073 1073 l.postrelease.append(callback)
1074 1074 else:
1075 1075 callback()
1076 1076
1077 1077 def lock(self, wait=True):
1078 1078 '''Lock the repository store (.hg/store) and return a weak reference
1079 1079 to the lock. Use this before modifying the store (e.g. committing or
1080 1080 stripping). If you are opening a transaction, get a lock as well.)'''
1081 1081 l = self._lockref and self._lockref()
1082 1082 if l is not None and l.held:
1083 1083 l.lock()
1084 1084 return l
1085 1085
1086 1086 def unlock():
1087 1087 for k, ce in self._filecache.items():
1088 1088 if k == 'dirstate' or k not in self.__dict__:
1089 1089 continue
1090 1090 ce.refresh()
1091 1091
1092 1092 l = self._lock(self.svfs, "lock", wait, unlock,
1093 1093 self.invalidate, _('repository %s') % self.origroot)
1094 1094 self._lockref = weakref.ref(l)
1095 1095 return l
1096 1096
1097 1097 def wlock(self, wait=True):
1098 1098 '''Lock the non-store parts of the repository (everything under
1099 1099 .hg except .hg/store) and return a weak reference to the lock.
1100 1100 Use this before modifying files in .hg.'''
1101 1101 l = self._wlockref and self._wlockref()
1102 1102 if l is not None and l.held:
1103 1103 l.lock()
1104 1104 return l
1105 1105
1106 1106 def unlock():
1107 1107 if self.dirstate.pendingparentchange():
1108 1108 self.dirstate.invalidate()
1109 1109 else:
1110 1110 self.dirstate.write()
1111 1111
1112 1112 self._filecache['dirstate'].refresh()
1113 1113
1114 1114 l = self._lock(self.vfs, "wlock", wait, unlock,
1115 1115 self.invalidatedirstate, _('working directory of %s') %
1116 1116 self.origroot)
1117 1117 self._wlockref = weakref.ref(l)
1118 1118 return l
1119 1119
1120 1120 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1121 1121 """
1122 1122 commit an individual file as part of a larger transaction
1123 1123 """
1124 1124
1125 1125 fname = fctx.path()
1126 1126 text = fctx.data()
1127 1127 flog = self.file(fname)
1128 1128 fparent1 = manifest1.get(fname, nullid)
1129 1129 fparent2 = manifest2.get(fname, nullid)
1130 1130
1131 1131 meta = {}
1132 1132 copy = fctx.renamed()
1133 1133 if copy and copy[0] != fname:
1134 1134 # Mark the new revision of this file as a copy of another
1135 1135 # file. This copy data will effectively act as a parent
1136 1136 # of this new revision. If this is a merge, the first
1137 1137 # parent will be the nullid (meaning "look up the copy data")
1138 1138 # and the second one will be the other parent. For example:
1139 1139 #
1140 1140 # 0 --- 1 --- 3 rev1 changes file foo
1141 1141 # \ / rev2 renames foo to bar and changes it
1142 1142 # \- 2 -/ rev3 should have bar with all changes and
1143 1143 # should record that bar descends from
1144 1144 # bar in rev2 and foo in rev1
1145 1145 #
1146 1146 # this allows this merge to succeed:
1147 1147 #
1148 1148 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1149 1149 # \ / merging rev3 and rev4 should use bar@rev2
1150 1150 # \- 2 --- 4 as the merge base
1151 1151 #
1152 1152
1153 1153 cfname = copy[0]
1154 1154 crev = manifest1.get(cfname)
1155 1155 newfparent = fparent2
1156 1156
1157 1157 if manifest2: # branch merge
1158 1158 if fparent2 == nullid or crev is None: # copied on remote side
1159 1159 if cfname in manifest2:
1160 1160 crev = manifest2[cfname]
1161 1161 newfparent = fparent1
1162 1162
1163 1163 # find source in nearest ancestor if we've lost track
1164 1164 if not crev:
1165 1165 self.ui.debug(" %s: searching for copy revision for %s\n" %
1166 1166 (fname, cfname))
1167 1167 for ancestor in self[None].ancestors():
1168 1168 if cfname in ancestor:
1169 1169 crev = ancestor[cfname].filenode()
1170 1170 break
1171 1171
1172 1172 if crev:
1173 1173 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1174 1174 meta["copy"] = cfname
1175 1175 meta["copyrev"] = hex(crev)
1176 1176 fparent1, fparent2 = nullid, newfparent
1177 1177 else:
1178 1178 self.ui.warn(_("warning: can't find ancestor for '%s' "
1179 1179 "copied from '%s'!\n") % (fname, cfname))
1180 1180
1181 1181 elif fparent1 == nullid:
1182 1182 fparent1, fparent2 = fparent2, nullid
1183 1183 elif fparent2 != nullid:
1184 1184 # is one parent an ancestor of the other?
1185 1185 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1186 1186 if fparent1 in fparentancestors:
1187 1187 fparent1, fparent2 = fparent2, nullid
1188 1188 elif fparent2 in fparentancestors:
1189 1189 fparent2 = nullid
1190 1190
1191 1191 # is the file changed?
1192 1192 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1193 1193 changelist.append(fname)
1194 1194 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1195 1195 # are just the flags changed during merge?
1196 1196 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1197 1197 changelist.append(fname)
1198 1198
1199 1199 return fparent1
1200 1200
1201 1201 @unfilteredmethod
1202 1202 def commit(self, text="", user=None, date=None, match=None, force=False,
1203 1203 editor=False, extra={}):
1204 1204 """Add a new revision to current repository.
1205 1205
1206 1206 Revision information is gathered from the working directory,
1207 1207 match can be used to filter the committed files. If editor is
1208 1208 supplied, it is called to get a commit message.
1209 1209 """
1210 1210
1211 1211 def fail(f, msg):
1212 1212 raise util.Abort('%s: %s' % (f, msg))
1213 1213
1214 1214 if not match:
1215 1215 match = matchmod.always(self.root, '')
1216 1216
1217 1217 if not force:
1218 1218 vdirs = []
1219 1219 match.explicitdir = vdirs.append
1220 1220 match.bad = fail
1221 1221
1222 1222 wlock = self.wlock()
1223 1223 try:
1224 1224 wctx = self[None]
1225 1225 merge = len(wctx.parents()) > 1
1226 1226
1227 1227 if (not force and merge and match and
1228 1228 (match.files() or match.anypats())):
1229 1229 raise util.Abort(_('cannot partially commit a merge '
1230 1230 '(do not specify files or patterns)'))
1231 1231
1232 1232 changes = self.status(match=match, clean=force)
1233 1233 if force:
1234 1234 changes[0].extend(changes[6]) # mq may commit unchanged files
1235 1235
1236 1236 # check subrepos
1237 1237 subs = []
1238 1238 commitsubs = set()
1239 1239 newstate = wctx.substate.copy()
1240 1240 # only manage subrepos and .hgsubstate if .hgsub is present
1241 1241 if '.hgsub' in wctx:
1242 1242 # we'll decide whether to track this ourselves, thanks
1243 1243 for c in changes[:3]:
1244 1244 if '.hgsubstate' in c:
1245 1245 c.remove('.hgsubstate')
1246 1246
1247 1247 # compare current state to last committed state
1248 1248 # build new substate based on last committed state
1249 1249 oldstate = wctx.p1().substate
1250 1250 for s in sorted(newstate.keys()):
1251 1251 if not match(s):
1252 1252 # ignore working copy, use old state if present
1253 1253 if s in oldstate:
1254 1254 newstate[s] = oldstate[s]
1255 1255 continue
1256 1256 if not force:
1257 1257 raise util.Abort(
1258 1258 _("commit with new subrepo %s excluded") % s)
1259 1259 if wctx.sub(s).dirty(True):
1260 1260 if not self.ui.configbool('ui', 'commitsubrepos'):
1261 1261 raise util.Abort(
1262 1262 _("uncommitted changes in subrepo %s") % s,
1263 1263 hint=_("use --subrepos for recursive commit"))
1264 1264 subs.append(s)
1265 1265 commitsubs.add(s)
1266 1266 else:
1267 1267 bs = wctx.sub(s).basestate()
1268 1268 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1269 1269 if oldstate.get(s, (None, None, None))[1] != bs:
1270 1270 subs.append(s)
1271 1271
1272 1272 # check for removed subrepos
1273 1273 for p in wctx.parents():
1274 1274 r = [s for s in p.substate if s not in newstate]
1275 1275 subs += [s for s in r if match(s)]
1276 1276 if subs:
1277 1277 if (not match('.hgsub') and
1278 1278 '.hgsub' in (wctx.modified() + wctx.added())):
1279 1279 raise util.Abort(
1280 1280 _("can't commit subrepos without .hgsub"))
1281 1281 changes[0].insert(0, '.hgsubstate')
1282 1282
1283 1283 elif '.hgsub' in changes[2]:
1284 1284 # clean up .hgsubstate when .hgsub is removed
1285 1285 if ('.hgsubstate' in wctx and
1286 1286 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1287 1287 changes[2].insert(0, '.hgsubstate')
1288 1288
1289 1289 # make sure all explicit patterns are matched
1290 1290 if not force and match.files():
1291 1291 matched = set(changes[0] + changes[1] + changes[2])
1292 1292
1293 1293 for f in match.files():
1294 1294 f = self.dirstate.normalize(f)
1295 1295 if f == '.' or f in matched or f in wctx.substate:
1296 1296 continue
1297 1297 if f in changes[3]: # missing
1298 1298 fail(f, _('file not found!'))
1299 1299 if f in vdirs: # visited directory
1300 1300 d = f + '/'
1301 1301 for mf in matched:
1302 1302 if mf.startswith(d):
1303 1303 break
1304 1304 else:
1305 1305 fail(f, _("no match under directory!"))
1306 1306 elif f not in self.dirstate:
1307 1307 fail(f, _("file not tracked!"))
1308 1308
1309 1309 cctx = context.workingctx(self, text, user, date, extra, changes)
1310 1310
1311 1311 if (not force and not extra.get("close") and not merge
1312 1312 and not cctx.files()
1313 1313 and wctx.branch() == wctx.p1().branch()):
1314 1314 return None
1315 1315
1316 1316 if merge and cctx.deleted():
1317 1317 raise util.Abort(_("cannot commit merge with missing files"))
1318 1318
1319 1319 ms = mergemod.mergestate(self)
1320 1320 for f in changes[0]:
1321 1321 if f in ms and ms[f] == 'u':
1322 1322 raise util.Abort(_("unresolved merge conflicts "
1323 1323 "(see hg help resolve)"))
1324 1324
1325 1325 if editor:
1326 1326 cctx._text = editor(self, cctx, subs)
1327 1327 edited = (text != cctx._text)
1328 1328
1329 1329 # Save commit message in case this transaction gets rolled back
1330 1330 # (e.g. by a pretxncommit hook). Leave the content alone on
1331 1331 # the assumption that the user will use the same editor again.
1332 1332 msgfn = self.savecommitmessage(cctx._text)
1333 1333
1334 1334 # commit subs and write new state
1335 1335 if subs:
1336 1336 for s in sorted(commitsubs):
1337 1337 sub = wctx.sub(s)
1338 1338 self.ui.status(_('committing subrepository %s\n') %
1339 1339 subrepo.subrelpath(sub))
1340 1340 sr = sub.commit(cctx._text, user, date)
1341 1341 newstate[s] = (newstate[s][0], sr)
1342 1342 subrepo.writestate(self, newstate)
1343 1343
1344 1344 p1, p2 = self.dirstate.parents()
1345 1345 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1346 1346 try:
1347 1347 self.hook("precommit", throw=True, parent1=hookp1,
1348 1348 parent2=hookp2)
1349 1349 ret = self.commitctx(cctx, True)
1350 1350 except: # re-raises
1351 1351 if edited:
1352 1352 self.ui.write(
1353 1353 _('note: commit message saved in %s\n') % msgfn)
1354 1354 raise
1355 1355
1356 1356 # update bookmarks, dirstate and mergestate
1357 1357 bookmarks.update(self, [p1, p2], ret)
1358 1358 cctx.markcommitted(ret)
1359 1359 ms.reset()
1360 1360 finally:
1361 1361 wlock.release()
1362 1362
1363 1363 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1364 1364 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1365 1365 self._afterlock(commithook)
1366 1366 return ret
1367 1367
1368 1368 @unfilteredmethod
1369 1369 def commitctx(self, ctx, error=False):
1370 1370 """Add a new revision to current repository.
1371 1371 Revision information is passed via the context argument.
1372 1372 """
1373 1373
1374 1374 tr = lock = None
1375 1375 removed = list(ctx.removed())
1376 1376 p1, p2 = ctx.p1(), ctx.p2()
1377 1377 user = ctx.user()
1378 1378
1379 1379 lock = self.lock()
1380 1380 try:
1381 1381 tr = self.transaction("commit")
1382 1382 trp = weakref.proxy(tr)
1383 1383
1384 1384 if ctx.files():
1385 1385 m1 = p1.manifest().copy()
1386 1386 m2 = p2.manifest()
1387 1387
1388 1388 # check in files
1389 1389 new = {}
1390 1390 changed = []
1391 1391 linkrev = len(self)
1392 1392 for f in sorted(ctx.modified() + ctx.added()):
1393 1393 self.ui.note(f + "\n")
1394 1394 try:
1395 1395 fctx = ctx[f]
1396 1396 if fctx is None:
1397 1397 removed.append(f)
1398 1398 else:
1399 1399 new[f] = self._filecommit(fctx, m1, m2, linkrev,
1400 1400 trp, changed)
1401 1401 m1.set(f, fctx.flags())
1402 1402 except OSError, inst:
1403 1403 self.ui.warn(_("trouble committing %s!\n") % f)
1404 1404 raise
1405 1405 except IOError, inst:
1406 1406 errcode = getattr(inst, 'errno', errno.ENOENT)
1407 1407 if error or errcode and errcode != errno.ENOENT:
1408 1408 self.ui.warn(_("trouble committing %s!\n") % f)
1409 1409 raise
1410 1410
1411 1411 # update manifest
1412 1412 m1.update(new)
1413 1413 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1414 1414 drop = [f for f in removed if f in m1]
1415 1415 for f in drop:
1416 1416 del m1[f]
1417 1417 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1418 1418 p2.manifestnode(), (new, drop))
1419 1419 files = changed + removed
1420 1420 else:
1421 1421 mn = p1.manifestnode()
1422 1422 files = []
1423 1423
1424 1424 # update changelog
1425 1425 self.changelog.delayupdate()
1426 1426 n = self.changelog.add(mn, files, ctx.description(),
1427 1427 trp, p1.node(), p2.node(),
1428 1428 user, ctx.date(), ctx.extra().copy())
1429 1429 p = lambda: self.changelog.writepending() and self.root or ""
1430 1430 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1431 1431 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1432 1432 parent2=xp2, pending=p)
1433 1433 self.changelog.finalize(trp)
1434 1434 # set the new commit is proper phase
1435 1435 targetphase = subrepo.newcommitphase(self.ui, ctx)
1436 1436 if targetphase:
1437 1437 # retract boundary do not alter parent changeset.
1438 1438 # if a parent have higher the resulting phase will
1439 1439 # be compliant anyway
1440 1440 #
1441 1441 # if minimal phase was 0 we don't need to retract anything
1442 1442 phases.retractboundary(self, tr, targetphase, [n])
1443 1443 tr.close()
1444 1444 branchmap.updatecache(self.filtered('served'))
1445 1445 return n
1446 1446 finally:
1447 1447 if tr:
1448 1448 tr.release()
1449 1449 lock.release()
1450 1450
1451 1451 @unfilteredmethod
1452 1452 def destroying(self):
1453 1453 '''Inform the repository that nodes are about to be destroyed.
1454 1454 Intended for use by strip and rollback, so there's a common
1455 1455 place for anything that has to be done before destroying history.
1456 1456
1457 1457 This is mostly useful for saving state that is in memory and waiting
1458 1458 to be flushed when the current lock is released. Because a call to
1459 1459 destroyed is imminent, the repo will be invalidated causing those
1460 1460 changes to stay in memory (waiting for the next unlock), or vanish
1461 1461 completely.
1462 1462 '''
1463 1463 # When using the same lock to commit and strip, the phasecache is left
1464 1464 # dirty after committing. Then when we strip, the repo is invalidated,
1465 1465 # causing those changes to disappear.
1466 1466 if '_phasecache' in vars(self):
1467 1467 self._phasecache.write()
1468 1468
1469 1469 @unfilteredmethod
1470 1470 def destroyed(self):
1471 1471 '''Inform the repository that nodes have been destroyed.
1472 1472 Intended for use by strip and rollback, so there's a common
1473 1473 place for anything that has to be done after destroying history.
1474 1474 '''
1475 1475 # When one tries to:
1476 1476 # 1) destroy nodes thus calling this method (e.g. strip)
1477 1477 # 2) use phasecache somewhere (e.g. commit)
1478 1478 #
1479 1479 # then 2) will fail because the phasecache contains nodes that were
1480 1480 # removed. We can either remove phasecache from the filecache,
1481 1481 # causing it to reload next time it is accessed, or simply filter
1482 1482 # the removed nodes now and write the updated cache.
1483 1483 self._phasecache.filterunknown(self)
1484 1484 self._phasecache.write()
1485 1485
1486 1486 # update the 'served' branch cache to help read only server process
1487 1487 # Thanks to branchcache collaboration this is done from the nearest
1488 1488 # filtered subset and it is expected to be fast.
1489 1489 branchmap.updatecache(self.filtered('served'))
1490 1490
1491 1491 # Ensure the persistent tag cache is updated. Doing it now
1492 1492 # means that the tag cache only has to worry about destroyed
1493 1493 # heads immediately after a strip/rollback. That in turn
1494 1494 # guarantees that "cachetip == currenttip" (comparing both rev
1495 1495 # and node) always means no nodes have been added or destroyed.
1496 1496
1497 1497 # XXX this is suboptimal when qrefresh'ing: we strip the current
1498 1498 # head, refresh the tag cache, then immediately add a new head.
1499 1499 # But I think doing it this way is necessary for the "instant
1500 1500 # tag cache retrieval" case to work.
1501 1501 self.invalidate()
1502 1502
1503 1503 def walk(self, match, node=None):
1504 1504 '''
1505 1505 walk recursively through the directory tree or a given
1506 1506 changeset, finding all files matched by the match
1507 1507 function
1508 1508 '''
1509 1509 return self[node].walk(match)
1510 1510
1511 1511 def status(self, node1='.', node2=None, match=None,
1512 1512 ignored=False, clean=False, unknown=False,
1513 1513 listsubrepos=False):
1514 1514 '''a convenience method that calls node1.status(node2)'''
1515 1515 return self[node1].status(node2, match, ignored, clean, unknown,
1516 1516 listsubrepos)
1517 1517
1518 1518 def heads(self, start=None):
1519 1519 heads = self.changelog.heads(start)
1520 1520 # sort the output in rev descending order
1521 1521 return sorted(heads, key=self.changelog.rev, reverse=True)
1522 1522
1523 1523 def branchheads(self, branch=None, start=None, closed=False):
1524 1524 '''return a (possibly filtered) list of heads for the given branch
1525 1525
1526 1526 Heads are returned in topological order, from newest to oldest.
1527 1527 If branch is None, use the dirstate branch.
1528 1528 If start is not None, return only heads reachable from start.
1529 1529 If closed is True, return heads that are marked as closed as well.
1530 1530 '''
1531 1531 if branch is None:
1532 1532 branch = self[None].branch()
1533 1533 branches = self.branchmap()
1534 1534 if branch not in branches:
1535 1535 return []
1536 1536 # the cache returns heads ordered lowest to highest
1537 1537 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1538 1538 if start is not None:
1539 1539 # filter out the heads that cannot be reached from startrev
1540 1540 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1541 1541 bheads = [h for h in bheads if h in fbheads]
1542 1542 return bheads
1543 1543
1544 1544 def branches(self, nodes):
1545 1545 if not nodes:
1546 1546 nodes = [self.changelog.tip()]
1547 1547 b = []
1548 1548 for n in nodes:
1549 1549 t = n
1550 1550 while True:
1551 1551 p = self.changelog.parents(n)
1552 1552 if p[1] != nullid or p[0] == nullid:
1553 1553 b.append((t, n, p[0], p[1]))
1554 1554 break
1555 1555 n = p[0]
1556 1556 return b
1557 1557
1558 1558 def between(self, pairs):
1559 1559 r = []
1560 1560
1561 1561 for top, bottom in pairs:
1562 1562 n, l, i = top, [], 0
1563 1563 f = 1
1564 1564
1565 1565 while n != bottom and n != nullid:
1566 1566 p = self.changelog.parents(n)[0]
1567 1567 if i == f:
1568 1568 l.append(n)
1569 1569 f = f * 2
1570 1570 n = p
1571 1571 i += 1
1572 1572
1573 1573 r.append(l)
1574 1574
1575 1575 return r
1576 1576
1577 1577 def pull(self, remote, heads=None, force=False, **kwargs):
1578 1578 return exchange.pull(self, remote, heads, force, **kwargs)
1579 1579
1580 1580 def checkpush(self, pushop):
1581 1581 """Extensions can override this function if additional checks have
1582 1582 to be performed before pushing, or call it if they override push
1583 1583 command.
1584 1584 """
1585 1585 pass
1586 1586
1587 1587 @unfilteredpropertycache
1588 1588 def prepushoutgoinghooks(self):
1589 1589 """Return util.hooks consists of "(repo, remote, outgoing)"
1590 1590 functions, which are called before pushing changesets.
1591 1591 """
1592 1592 return util.hooks()
1593 1593
1594 1594 def stream_in(self, remote, requirements):
1595 1595 lock = self.lock()
1596 1596 try:
1597 1597 # Save remote branchmap. We will use it later
1598 1598 # to speed up branchcache creation
1599 1599 rbranchmap = None
1600 1600 if remote.capable("branchmap"):
1601 1601 rbranchmap = remote.branchmap()
1602 1602
1603 1603 fp = remote.stream_out()
1604 1604 l = fp.readline()
1605 1605 try:
1606 1606 resp = int(l)
1607 1607 except ValueError:
1608 1608 raise error.ResponseError(
1609 1609 _('unexpected response from remote server:'), l)
1610 1610 if resp == 1:
1611 1611 raise util.Abort(_('operation forbidden by server'))
1612 1612 elif resp == 2:
1613 1613 raise util.Abort(_('locking the remote repository failed'))
1614 1614 elif resp != 0:
1615 1615 raise util.Abort(_('the server sent an unknown error code'))
1616 1616 self.ui.status(_('streaming all changes\n'))
1617 1617 l = fp.readline()
1618 1618 try:
1619 1619 total_files, total_bytes = map(int, l.split(' ', 1))
1620 1620 except (ValueError, TypeError):
1621 1621 raise error.ResponseError(
1622 1622 _('unexpected response from remote server:'), l)
1623 1623 self.ui.status(_('%d files to transfer, %s of data\n') %
1624 1624 (total_files, util.bytecount(total_bytes)))
1625 1625 handled_bytes = 0
1626 1626 self.ui.progress(_('clone'), 0, total=total_bytes)
1627 1627 start = time.time()
1628 1628
1629 1629 tr = self.transaction(_('clone'))
1630 1630 try:
1631 1631 for i in xrange(total_files):
1632 1632 # XXX doesn't support '\n' or '\r' in filenames
1633 1633 l = fp.readline()
1634 1634 try:
1635 1635 name, size = l.split('\0', 1)
1636 1636 size = int(size)
1637 1637 except (ValueError, TypeError):
1638 1638 raise error.ResponseError(
1639 1639 _('unexpected response from remote server:'), l)
1640 1640 if self.ui.debugflag:
1641 1641 self.ui.debug('adding %s (%s)\n' %
1642 1642 (name, util.bytecount(size)))
1643 1643 # for backwards compat, name was partially encoded
1644 1644 ofp = self.sopener(store.decodedir(name), 'w')
1645 1645 for chunk in util.filechunkiter(fp, limit=size):
1646 1646 handled_bytes += len(chunk)
1647 1647 self.ui.progress(_('clone'), handled_bytes,
1648 1648 total=total_bytes)
1649 1649 ofp.write(chunk)
1650 1650 ofp.close()
1651 1651 tr.close()
1652 1652 finally:
1653 1653 tr.release()
1654 1654
1655 1655 # Writing straight to files circumvented the inmemory caches
1656 1656 self.invalidate()
1657 1657
1658 1658 elapsed = time.time() - start
1659 1659 if elapsed <= 0:
1660 1660 elapsed = 0.001
1661 1661 self.ui.progress(_('clone'), None)
1662 1662 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1663 1663 (util.bytecount(total_bytes), elapsed,
1664 1664 util.bytecount(total_bytes / elapsed)))
1665 1665
1666 1666 # new requirements = old non-format requirements +
1667 1667 # new format-related
1668 1668 # requirements from the streamed-in repository
1669 1669 requirements.update(set(self.requirements) - self.supportedformats)
1670 1670 self._applyrequirements(requirements)
1671 1671 self._writerequirements()
1672 1672
1673 1673 if rbranchmap:
1674 1674 rbheads = []
1675 1675 for bheads in rbranchmap.itervalues():
1676 1676 rbheads.extend(bheads)
1677 1677
1678 1678 if rbheads:
1679 1679 rtiprev = max((int(self.changelog.rev(node))
1680 1680 for node in rbheads))
1681 1681 cache = branchmap.branchcache(rbranchmap,
1682 1682 self[rtiprev].node(),
1683 1683 rtiprev)
1684 1684 # Try to stick it as low as possible
1685 1685 # filter above served are unlikely to be fetch from a clone
1686 1686 for candidate in ('base', 'immutable', 'served'):
1687 1687 rview = self.filtered(candidate)
1688 1688 if cache.validfor(rview):
1689 1689 self._branchcaches[candidate] = cache
1690 1690 cache.write(rview)
1691 1691 break
1692 1692 self.invalidate()
1693 1693 return len(self.heads()) + 1
1694 1694 finally:
1695 1695 lock.release()
1696 1696
1697 1697 def clone(self, remote, heads=[], stream=False):
1698 1698 '''clone remote repository.
1699 1699
1700 1700 keyword arguments:
1701 1701 heads: list of revs to clone (forces use of pull)
1702 1702 stream: use streaming clone if possible'''
1703 1703
1704 1704 # now, all clients that can request uncompressed clones can
1705 1705 # read repo formats supported by all servers that can serve
1706 1706 # them.
1707 1707
1708 1708 # if revlog format changes, client will have to check version
1709 1709 # and format flags on "stream" capability, and use
1710 1710 # uncompressed only if compatible.
1711 1711
1712 1712 if not stream:
1713 1713 # if the server explicitly prefers to stream (for fast LANs)
1714 1714 stream = remote.capable('stream-preferred')
1715 1715
1716 1716 if stream and not heads:
1717 1717 # 'stream' means remote revlog format is revlogv1 only
1718 1718 if remote.capable('stream'):
1719 1719 return self.stream_in(remote, set(('revlogv1',)))
1720 1720 # otherwise, 'streamreqs' contains the remote revlog format
1721 1721 streamreqs = remote.capable('streamreqs')
1722 1722 if streamreqs:
1723 1723 streamreqs = set(streamreqs.split(','))
1724 1724 # if we support it, stream in and adjust our requirements
1725 1725 if not streamreqs - self.supportedformats:
1726 1726 return self.stream_in(remote, streamreqs)
1727 1727
1728 1728 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1729 1729 try:
1730 1730 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1731 1731 ret = self.pull(remote, heads)
1732 1732 finally:
1733 1733 self.ui.restoreconfig(quiet)
1734 1734 return ret
1735 1735
1736 1736 def pushkey(self, namespace, key, old, new):
1737 1737 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1738 1738 old=old, new=new)
1739 1739 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1740 1740 ret = pushkey.push(self, namespace, key, old, new)
1741 1741 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1742 1742 ret=ret)
1743 1743 return ret
1744 1744
1745 1745 def listkeys(self, namespace):
1746 1746 self.hook('prelistkeys', throw=True, namespace=namespace)
1747 1747 self.ui.debug('listing keys for "%s"\n' % namespace)
1748 1748 values = pushkey.list(self, namespace)
1749 1749 self.hook('listkeys', namespace=namespace, values=values)
1750 1750 return values
1751 1751
1752 1752 def debugwireargs(self, one, two, three=None, four=None, five=None):
1753 1753 '''used to test argument passing over the wire'''
1754 1754 return "%s %s %s %s %s" % (one, two, three, four, five)
1755 1755
1756 1756 def savecommitmessage(self, text):
1757 1757 fp = self.opener('last-message.txt', 'wb')
1758 1758 try:
1759 1759 fp.write(text)
1760 1760 finally:
1761 1761 fp.close()
1762 1762 return self.pathto(fp.name[len(self.root) + 1:])
1763 1763
1764 1764 # used to avoid circular references so destructors work
1765 1765 def aftertrans(files):
1766 1766 renamefiles = [tuple(t) for t in files]
1767 1767 def a():
1768 1768 for vfs, src, dest in renamefiles:
1769 1769 try:
1770 1770 vfs.rename(src, dest)
1771 1771 except OSError: # journal file does not yet exist
1772 1772 pass
1773 1773 return a
1774 1774
1775 1775 def undoname(fn):
1776 1776 base, name = os.path.split(fn)
1777 1777 assert name.startswith('journal')
1778 1778 return os.path.join(base, name.replace('journal', 'undo', 1))
1779 1779
1780 1780 def instance(ui, path, create):
1781 1781 return localrepository(ui, util.urllocalpath(path), create)
1782 1782
1783 1783 def islocal(path):
1784 1784 return True
@@ -1,605 +1,606 b''
1 1 $ hg init test
2 2 $ cd test
3 3
4 4 $ echo a > a
5 5 $ hg add a
6 6 $ hg commit -m "test"
7 7 $ hg history
8 8 changeset: 0:acb14030fe0a
9 9 tag: tip
10 10 user: test
11 11 date: Thu Jan 01 00:00:00 1970 +0000
12 12 summary: test
13 13
14 14
15 15 $ hg tag ' '
16 16 abort: tag names cannot consist entirely of whitespace
17 17 [255]
18 18
19 19 (this tests also that editor is not invoked, if '--edit' is not
20 20 specified)
21 21
22 22 $ HGEDITOR=cat hg tag "bleah"
23 23 $ hg history
24 24 changeset: 1:d4f0d2909abc
25 25 tag: tip
26 26 user: test
27 27 date: Thu Jan 01 00:00:00 1970 +0000
28 28 summary: Added tag bleah for changeset acb14030fe0a
29 29
30 30 changeset: 0:acb14030fe0a
31 31 tag: bleah
32 32 user: test
33 33 date: Thu Jan 01 00:00:00 1970 +0000
34 34 summary: test
35 35
36 36
37 37 $ echo foo >> .hgtags
38 38 $ hg tag "bleah2"
39 abort: working copy of .hgtags is changed (please commit .hgtags manually)
39 abort: working copy of .hgtags is changed
40 (please commit .hgtags manually)
40 41 [255]
41 42
42 43 $ hg revert .hgtags
43 44 $ hg tag -r 0 x y z y y z
44 45 abort: tag names must be unique
45 46 [255]
46 47 $ hg tag tap nada dot tip
47 48 abort: the name 'tip' is reserved
48 49 [255]
49 50 $ hg tag .
50 51 abort: the name '.' is reserved
51 52 [255]
52 53 $ hg tag null
53 54 abort: the name 'null' is reserved
54 55 [255]
55 56 $ hg tag "bleah"
56 57 abort: tag 'bleah' already exists (use -f to force)
57 58 [255]
58 59 $ hg tag "blecch" "bleah"
59 60 abort: tag 'bleah' already exists (use -f to force)
60 61 [255]
61 62
62 63 $ hg tag --remove "blecch"
63 64 abort: tag 'blecch' does not exist
64 65 [255]
65 66 $ hg tag --remove "bleah" "blecch" "blough"
66 67 abort: tag 'blecch' does not exist
67 68 [255]
68 69
69 70 $ hg tag -r 0 "bleah0"
70 71 $ hg tag -l -r 1 "bleah1"
71 72 $ hg tag gack gawk gorp
72 73 $ hg tag -f gack
73 74 $ hg tag --remove gack gorp
74 75
75 76 $ hg tag "bleah "
76 77 abort: tag 'bleah' already exists (use -f to force)
77 78 [255]
78 79 $ hg tag " bleah"
79 80 abort: tag 'bleah' already exists (use -f to force)
80 81 [255]
81 82 $ hg tag " bleah"
82 83 abort: tag 'bleah' already exists (use -f to force)
83 84 [255]
84 85 $ hg tag -r 0 " bleahbleah "
85 86 $ hg tag -r 0 " bleah bleah "
86 87
87 88 $ cat .hgtags
88 89 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
89 90 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah0
90 91 336fccc858a4eb69609a291105009e484a6b6b8d gack
91 92 336fccc858a4eb69609a291105009e484a6b6b8d gawk
92 93 336fccc858a4eb69609a291105009e484a6b6b8d gorp
93 94 336fccc858a4eb69609a291105009e484a6b6b8d gack
94 95 799667b6f2d9b957f73fa644a918c2df22bab58f gack
95 96 799667b6f2d9b957f73fa644a918c2df22bab58f gack
96 97 0000000000000000000000000000000000000000 gack
97 98 336fccc858a4eb69609a291105009e484a6b6b8d gorp
98 99 0000000000000000000000000000000000000000 gorp
99 100 acb14030fe0a21b60322c440ad2d20cf7685a376 bleahbleah
100 101 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah bleah
101 102
102 103 $ cat .hg/localtags
103 104 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
104 105
105 106 tagging on a non-head revision
106 107
107 108 $ hg update 0
108 109 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
109 110 $ hg tag -l localblah
110 111 $ hg tag "foobar"
111 112 abort: not at a branch head (use -f to force)
112 113 [255]
113 114 $ hg tag -f "foobar"
114 115 $ cat .hgtags
115 116 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
116 117 $ cat .hg/localtags
117 118 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
118 119 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
119 120
120 121 $ hg tag -l 'xx
121 122 > newline'
122 123 abort: '\n' cannot be used in a name
123 124 [255]
124 125 $ hg tag -l 'xx:xx'
125 126 abort: ':' cannot be used in a name
126 127 [255]
127 128
128 129 cloning local tags
129 130
130 131 $ cd ..
131 132 $ hg -R test log -r0:5
132 133 changeset: 0:acb14030fe0a
133 134 tag: bleah
134 135 tag: bleah bleah
135 136 tag: bleah0
136 137 tag: bleahbleah
137 138 tag: foobar
138 139 tag: localblah
139 140 user: test
140 141 date: Thu Jan 01 00:00:00 1970 +0000
141 142 summary: test
142 143
143 144 changeset: 1:d4f0d2909abc
144 145 tag: bleah1
145 146 user: test
146 147 date: Thu Jan 01 00:00:00 1970 +0000
147 148 summary: Added tag bleah for changeset acb14030fe0a
148 149
149 150 changeset: 2:336fccc858a4
150 151 tag: gawk
151 152 user: test
152 153 date: Thu Jan 01 00:00:00 1970 +0000
153 154 summary: Added tag bleah0 for changeset acb14030fe0a
154 155
155 156 changeset: 3:799667b6f2d9
156 157 user: test
157 158 date: Thu Jan 01 00:00:00 1970 +0000
158 159 summary: Added tag gack, gawk, gorp for changeset 336fccc858a4
159 160
160 161 changeset: 4:154eeb7c0138
161 162 user: test
162 163 date: Thu Jan 01 00:00:00 1970 +0000
163 164 summary: Added tag gack for changeset 799667b6f2d9
164 165
165 166 changeset: 5:b4bb47aaff09
166 167 user: test
167 168 date: Thu Jan 01 00:00:00 1970 +0000
168 169 summary: Removed tag gack, gorp
169 170
170 171 $ hg clone -q -rbleah1 test test1
171 172 $ hg -R test1 parents --style=compact
172 173 1[tip] d4f0d2909abc 1970-01-01 00:00 +0000 test
173 174 Added tag bleah for changeset acb14030fe0a
174 175
175 176 $ hg clone -q -r5 test#bleah1 test2
176 177 $ hg -R test2 parents --style=compact
177 178 5[tip] b4bb47aaff09 1970-01-01 00:00 +0000 test
178 179 Removed tag gack, gorp
179 180
180 181 $ hg clone -q -U test#bleah1 test3
181 182 $ hg -R test3 parents --style=compact
182 183
183 184 $ cd test
184 185
185 186 Issue601: hg tag doesn't do the right thing if .hgtags or localtags
186 187 doesn't end with EOL
187 188
188 189 $ python << EOF
189 190 > f = file('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
190 191 > f = file('.hg/localtags', 'w'); f.write(last); f.close()
191 192 > EOF
192 193 $ cat .hg/localtags; echo
193 194 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
194 195 $ hg tag -l localnewline
195 196 $ cat .hg/localtags; echo
196 197 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
197 198 c2899151f4e76890c602a2597a650a72666681bf localnewline
198 199
199 200
200 201 $ python << EOF
201 202 > f = file('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
202 203 > f = file('.hgtags', 'w'); f.write(last); f.close()
203 204 > EOF
204 205 $ hg ci -m'broken manual edit of .hgtags'
205 206 $ cat .hgtags; echo
206 207 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
207 208 $ hg tag newline
208 209 $ cat .hgtags; echo
209 210 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
210 211 a0eea09de1eeec777b46f2085260a373b2fbc293 newline
211 212
212 213
213 214 tag and branch using same name
214 215
215 216 $ hg branch tag-and-branch-same-name
216 217 marked working directory as branch tag-and-branch-same-name
217 218 (branches are permanent and global, did you want a bookmark?)
218 219 $ hg ci -m"discouraged"
219 220 $ hg tag tag-and-branch-same-name
220 221 warning: tag tag-and-branch-same-name conflicts with existing branch name
221 222
222 223 test custom commit messages
223 224
224 225 $ cat > editor.sh << '__EOF__'
225 226 > echo "==== before editing"
226 227 > cat "$1"
227 228 > echo "===="
228 229 > echo "custom tag message" > "$1"
229 230 > echo "second line" >> "$1"
230 231 > __EOF__
231 232
232 233 at first, test saving last-message.txt
233 234
234 235 (test that editor is not invoked before transaction starting)
235 236
236 237 $ cat > .hg/hgrc << '__EOF__'
237 238 > [hooks]
238 239 > # this failure occurs before editor invocation
239 240 > pretag.test-saving-lastmessage = false
240 241 > __EOF__
241 242 $ rm -f .hg/last-message.txt
242 243 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e
243 244 abort: pretag.test-saving-lastmessage hook exited with status 1
244 245 [255]
245 246 $ test -f .hg/last-message.txt
246 247 [1]
247 248
248 249 (test that editor is invoked and commit message is saved into
249 250 "last-message.txt")
250 251
251 252 $ cat >> .hg/hgrc << '__EOF__'
252 253 > [hooks]
253 254 > pretag.test-saving-lastmessage =
254 255 > # this failure occurs after editor invocation
255 256 > pretxncommit.unexpectedabort = false
256 257 > __EOF__
257 258
258 259 (this tests also that editor is invoked, if '--edit' is specified,
259 260 regardless of '--message')
260 261
261 262 $ rm -f .hg/last-message.txt
262 263 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e -m "foo bar"
263 264 ==== before editing
264 265 foo bar
265 266
266 267
267 268 HG: Enter commit message. Lines beginning with 'HG:' are removed.
268 269 HG: Leave message empty to abort commit.
269 270 HG: --
270 271 HG: user: test
271 272 HG: branch 'tag-and-branch-same-name'
272 273 HG: changed .hgtags
273 274 ====
274 275 transaction abort!
275 276 rollback completed
276 277 note: commit message saved in .hg/last-message.txt
277 278 abort: pretxncommit.unexpectedabort hook exited with status 1
278 279 [255]
279 280 $ cat .hg/last-message.txt
280 281 custom tag message
281 282 second line
282 283
283 284 $ cat >> .hg/hgrc << '__EOF__'
284 285 > [hooks]
285 286 > pretxncommit.unexpectedabort =
286 287 > __EOF__
287 288 $ hg status .hgtags
288 289 M .hgtags
289 290 $ hg revert --no-backup -q .hgtags
290 291
291 292 then, test custom commit message itself
292 293
293 294 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e
294 295 ==== before editing
295 296 Added tag custom-tag for changeset 75a534207be6
296 297
297 298
298 299 HG: Enter commit message. Lines beginning with 'HG:' are removed.
299 300 HG: Leave message empty to abort commit.
300 301 HG: --
301 302 HG: user: test
302 303 HG: branch 'tag-and-branch-same-name'
303 304 HG: changed .hgtags
304 305 ====
305 306 $ hg log -l1 --template "{desc}\n"
306 307 custom tag message
307 308 second line
308 309
309 310
310 311 local tag with .hgtags modified
311 312
312 313 $ hg tag hgtags-modified
313 314 $ hg rollback
314 315 repository tip rolled back to revision 13 (undo commit)
315 316 working directory now based on revision 13
316 317 $ hg st
317 318 M .hgtags
318 319 ? .hgtags.orig
319 320 ? editor.sh
320 321 $ hg tag --local baz
321 322 $ hg revert --no-backup .hgtags
322 323
323 324
324 325 tagging when at named-branch-head that's not a topo-head
325 326
326 327 $ hg up default
327 328 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
328 329 $ hg merge -t internal:local
329 330 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
330 331 (branch merge, don't forget to commit)
331 332 $ hg ci -m 'merge named branch'
332 333 $ hg up 13
333 334 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
334 335 $ hg tag new-topo-head
335 336
336 337 tagging on null rev
337 338
338 339 $ hg up null
339 340 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
340 341 $ hg tag nullrev
341 342 abort: not at a branch head (use -f to force)
342 343 [255]
343 344
344 345 $ hg init empty
345 346 $ hg tag -R empty nullrev
346 347 abort: cannot tag null revision
347 348 [255]
348 349
349 350 $ hg tag -R empty -r 00000000000 -f nulltag
350 351 abort: cannot tag null revision
351 352 [255]
352 353
353 354 $ cd ..
354 355
355 356 tagging on an uncommitted merge (issue2542)
356 357
357 358 $ hg init repo-tag-uncommitted-merge
358 359 $ cd repo-tag-uncommitted-merge
359 360 $ echo c1 > f1
360 361 $ hg ci -Am0
361 362 adding f1
362 363 $ echo c2 > f2
363 364 $ hg ci -Am1
364 365 adding f2
365 366 $ hg co -q 0
366 367 $ hg branch b1
367 368 marked working directory as branch b1
368 369 (branches are permanent and global, did you want a bookmark?)
369 370 $ hg ci -m2
370 371 $ hg up default
371 372 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
372 373 $ hg merge b1
373 374 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
374 375 (branch merge, don't forget to commit)
375 376
376 377 $ hg tag t1
377 378 abort: uncommitted merge
378 379 [255]
379 380 $ hg status
380 381 $ hg tag --rev 1 t2
381 382 abort: uncommitted merge
382 383 [255]
383 384 $ hg tag --rev 1 --local t3
384 385 $ hg tags -v
385 386 tip 2:2a156e8887cc
386 387 t3 1:c3adabd1a5f4 local
387 388
388 389 $ cd ..
389 390
390 391 commit hook on tag used to be run without write lock - issue3344
391 392
392 393 $ hg init repo-tag
393 394 $ touch repo-tag/test
394 395 $ hg -R repo-tag commit -A -m "test"
395 396 adding test
396 397 $ hg init repo-tag-target
397 398 $ hg -R repo-tag --config hooks.commit="\"hg\" push \"`pwd`/repo-tag-target\"" tag tag
398 399 pushing to $TESTTMP/repo-tag-target (glob)
399 400 searching for changes
400 401 adding changesets
401 402 adding manifests
402 403 adding file changes
403 404 added 2 changesets with 2 changes to 2 files
404 405
405 406 automatically merge resolvable tag conflicts (i.e. tags that differ in rank)
406 407 create two clones with some different tags as well as some common tags
407 408 check that we can merge tags that differ in rank
408 409
409 410 $ hg init repo-automatic-tag-merge
410 411 $ cd repo-automatic-tag-merge
411 412 $ echo c0 > f0
412 413 $ hg ci -A -m0
413 414 adding f0
414 415 $ hg tag tbase
415 416 $ cd ..
416 417 $ hg clone repo-automatic-tag-merge repo-automatic-tag-merge-clone
417 418 updating to branch default
418 419 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
419 420 $ cd repo-automatic-tag-merge-clone
420 421 $ echo c1 > f1
421 422 $ hg ci -A -m1
422 423 adding f1
423 424 $ hg tag t1 t2 t3
424 425 $ hg tag --remove t2
425 426 $ hg tag t5
426 427 $ echo c2 > f2
427 428 $ hg ci -A -m2
428 429 adding f2
429 430 $ hg tag -f t3
430 431
431 432 $ cd ../repo-automatic-tag-merge
432 433 $ echo c3 > f3
433 434 $ hg ci -A -m3
434 435 adding f3
435 436 $ hg tag -f t4 t5 t6
436 437 $ hg tag --remove t5
437 438 $ echo c4 > f4
438 439 $ hg ci -A -m4
439 440 adding f4
440 441 $ hg tag t2
441 442 $ hg tag -f t6
442 443
443 444 $ cd ../repo-automatic-tag-merge-clone
444 445 $ hg pull
445 446 pulling from $TESTTMP/repo-automatic-tag-merge (glob)
446 447 searching for changes
447 448 adding changesets
448 449 adding manifests
449 450 adding file changes
450 451 added 6 changesets with 6 changes to 3 files (+1 heads)
451 452 (run 'hg heads' to see heads, 'hg merge' to merge)
452 453 $ hg merge --tool internal:tagmerge
453 454 merging .hgtags
454 455 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
455 456 (branch merge, don't forget to commit)
456 457 $ hg status
457 458 M .hgtags
458 459 M f3
459 460 M f4
460 461 $ hg resolve -l
461 462 R .hgtags
462 463 $ cat .hgtags
463 464 9aa4e1292a27a248f8d07339bed9931d54907be7 t4
464 465 9aa4e1292a27a248f8d07339bed9931d54907be7 t6
465 466 9aa4e1292a27a248f8d07339bed9931d54907be7 t6
466 467 09af2ce14077a94effef208b49a718f4836d4338 t6
467 468 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
468 469 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
469 470 929bca7b18d067cbf3844c3896319a940059d748 t2
470 471 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
471 472 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
472 473 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
473 474 0000000000000000000000000000000000000000 t2
474 475 875517b4806a848f942811a315a5bce30804ae85 t5
475 476 9aa4e1292a27a248f8d07339bed9931d54907be7 t5
476 477 9aa4e1292a27a248f8d07339bed9931d54907be7 t5
477 478 0000000000000000000000000000000000000000 t5
478 479 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
479 480 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
480 481
481 482 check that the merge tried to minimize the diff witht he first merge parent
482 483
483 484 $ hg diff --git -r 'p1()' .hgtags
484 485 diff --git a/.hgtags b/.hgtags
485 486 --- a/.hgtags
486 487 +++ b/.hgtags
487 488 @@ -1,9 +1,17 @@
488 489 +9aa4e1292a27a248f8d07339bed9931d54907be7 t4
489 490 +9aa4e1292a27a248f8d07339bed9931d54907be7 t6
490 491 +9aa4e1292a27a248f8d07339bed9931d54907be7 t6
491 492 +09af2ce14077a94effef208b49a718f4836d4338 t6
492 493 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
493 494 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
494 495 +929bca7b18d067cbf3844c3896319a940059d748 t2
495 496 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
496 497 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
497 498 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
498 499 0000000000000000000000000000000000000000 t2
499 500 875517b4806a848f942811a315a5bce30804ae85 t5
500 501 +9aa4e1292a27a248f8d07339bed9931d54907be7 t5
501 502 +9aa4e1292a27a248f8d07339bed9931d54907be7 t5
502 503 +0000000000000000000000000000000000000000 t5
503 504 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
504 505 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
505 506
506 507 detect merge tag conflicts
507 508
508 509 $ hg update -C -r tip
509 510 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
510 511 $ hg tag t7
511 512 $ hg update -C -r 'first(sort(head()))'
512 513 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
513 514 $ printf "%s %s\n" `hg log -r . --template "{node} t7"` >> .hgtags
514 515 $ hg commit -m "manually add conflicting t7 tag"
515 516 $ hg merge --tool internal:tagmerge
516 517 merging .hgtags
517 518 automatic .hgtags merge failed
518 519 the following 1 tags are in conflict: t7
519 520 automatic tag merging of .hgtags failed! (use 'hg resolve --tool internal:merge' or another merge tool of your choice)
520 521 2 files updated, 0 files merged, 0 files removed, 1 files unresolved
521 522 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
522 523 [1]
523 524 $ hg resolve -l
524 525 U .hgtags
525 526 $ cat .hgtags
526 527 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
527 528 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
528 529 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
529 530 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
530 531 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
531 532 0000000000000000000000000000000000000000 t2
532 533 875517b4806a848f942811a315a5bce30804ae85 t5
533 534 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
534 535 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
535 536 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
536 537
537 538 $ cd ..
538 539
539 540 handle the loss of tags
540 541
541 542 $ hg clone repo-automatic-tag-merge-clone repo-merge-lost-tags
542 543 updating to branch default
543 544 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
544 545 $ cd repo-merge-lost-tags
545 546 $ echo c5 > f5
546 547 $ hg ci -A -m5
547 548 adding f5
548 549 $ hg tag -f t7
549 550 $ hg update -r 'p1(t7)'
550 551 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
551 552 $ printf '' > .hgtags
552 553 $ hg commit -m 'delete all tags'
553 554 created new head
554 555 $ hg update -r 'max(t7::)'
555 556 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
556 557 $ hg merge -r tip --tool internal:tagmerge
557 558 merging .hgtags
558 559 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
559 560 (branch merge, don't forget to commit)
560 561 $ hg resolve -l
561 562 R .hgtags
562 563 $ cat .hgtags
563 564 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
564 565 0000000000000000000000000000000000000000 tbase
565 566 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
566 567 0000000000000000000000000000000000000000 t1
567 568 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
568 569 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
569 570 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
570 571 0000000000000000000000000000000000000000 t2
571 572 875517b4806a848f942811a315a5bce30804ae85 t5
572 573 0000000000000000000000000000000000000000 t5
573 574 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
574 575 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
575 576 0000000000000000000000000000000000000000 t3
576 577 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
577 578 0000000000000000000000000000000000000000 t7
578 579 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
579 580 fd3a9e394ce3afb354a496323bf68ac1755a30de t7
580 581
581 582 also check that we minimize the diff with the 1st merge parent
582 583
583 584 $ hg diff --git -r 'p1()' .hgtags
584 585 diff --git a/.hgtags b/.hgtags
585 586 --- a/.hgtags
586 587 +++ b/.hgtags
587 588 @@ -1,12 +1,17 @@
588 589 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
589 590 +0000000000000000000000000000000000000000 tbase
590 591 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
591 592 +0000000000000000000000000000000000000000 t1
592 593 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
593 594 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
594 595 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
595 596 0000000000000000000000000000000000000000 t2
596 597 875517b4806a848f942811a315a5bce30804ae85 t5
597 598 +0000000000000000000000000000000000000000 t5
598 599 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
599 600 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
600 601 +0000000000000000000000000000000000000000 t3
601 602 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
602 603 +0000000000000000000000000000000000000000 t7
603 604 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
604 605 fd3a9e394ce3afb354a496323bf68ac1755a30de t7
605 606
General Comments 0
You need to be logged in to leave comments. Login now