##// END OF EJS Templates
localrepo: filter unknown nodes from the phasecache on destroyed...
Idan Kamara -
r18221:082d6929 default
parent child Browse files
Show More
@@ -1,2574 +1,2586 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return discovery.visiblebranchmap(self._repo)
95 95
96 96 def heads(self):
97 97 return discovery.visibleheads(self._repo)
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150 filtername = None
151 151
152 152 def _baserequirements(self, create):
153 153 return self.requirements[:]
154 154
155 155 def __init__(self, baseui, path=None, create=False):
156 156 self.wvfs = scmutil.vfs(path, expand=True)
157 157 self.wopener = self.wvfs
158 158 self.root = self.wvfs.base
159 159 self.path = self.wvfs.join(".hg")
160 160 self.origroot = path
161 161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 162 self.vfs = scmutil.vfs(self.path)
163 163 self.opener = self.vfs
164 164 self.baseui = baseui
165 165 self.ui = baseui.copy()
166 166 # A list of callback to shape the phase if no data were found.
167 167 # Callback are in the form: func(repo, roots) --> processed root.
168 168 # This list it to be filled by extension during repo setup
169 169 self._phasedefaults = []
170 170 try:
171 171 self.ui.readconfig(self.join("hgrc"), self.root)
172 172 extensions.loadall(self.ui)
173 173 except IOError:
174 174 pass
175 175
176 176 if not self.vfs.isdir():
177 177 if create:
178 178 if not self.wvfs.exists():
179 179 self.wvfs.makedirs()
180 180 self.vfs.makedir(notindexed=True)
181 181 requirements = self._baserequirements(create)
182 182 if self.ui.configbool('format', 'usestore', True):
183 183 self.vfs.mkdir("store")
184 184 requirements.append("store")
185 185 if self.ui.configbool('format', 'usefncache', True):
186 186 requirements.append("fncache")
187 187 if self.ui.configbool('format', 'dotencode', True):
188 188 requirements.append('dotencode')
189 189 # create an invalid changelog
190 190 self.vfs.append(
191 191 "00changelog.i",
192 192 '\0\0\0\2' # represents revlogv2
193 193 ' dummy changelog to prevent using the old repo layout'
194 194 )
195 195 if self.ui.configbool('format', 'generaldelta', False):
196 196 requirements.append("generaldelta")
197 197 requirements = set(requirements)
198 198 else:
199 199 raise error.RepoError(_("repository %s not found") % path)
200 200 elif create:
201 201 raise error.RepoError(_("repository %s already exists") % path)
202 202 else:
203 203 try:
204 204 requirements = scmutil.readrequires(self.vfs, self.supported)
205 205 except IOError, inst:
206 206 if inst.errno != errno.ENOENT:
207 207 raise
208 208 requirements = set()
209 209
210 210 self.sharedpath = self.path
211 211 try:
212 212 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
213 213 if not os.path.exists(s):
214 214 raise error.RepoError(
215 215 _('.hg/sharedpath points to nonexistent directory %s') % s)
216 216 self.sharedpath = s
217 217 except IOError, inst:
218 218 if inst.errno != errno.ENOENT:
219 219 raise
220 220
221 221 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
222 222 self.spath = self.store.path
223 223 self.svfs = self.store.vfs
224 224 self.sopener = self.svfs
225 225 self.sjoin = self.store.join
226 226 self.vfs.createmode = self.store.createmode
227 227 self._applyrequirements(requirements)
228 228 if create:
229 229 self._writerequirements()
230 230
231 231
232 232 self._branchcaches = {}
233 233 self.filterpats = {}
234 234 self._datafilters = {}
235 235 self._transref = self._lockref = self._wlockref = None
236 236
237 237 # A cache for various files under .hg/ that tracks file changes,
238 238 # (used by the filecache decorator)
239 239 #
240 240 # Maps a property name to its util.filecacheentry
241 241 self._filecache = {}
242 242
243 243 # hold sets of revision to be filtered
244 244 # should be cleared when something might have changed the filter value:
245 245 # - new changesets,
246 246 # - phase change,
247 247 # - new obsolescence marker,
248 248 # - working directory parent change,
249 249 # - bookmark changes
250 250 self.filteredrevcache = {}
251 251
252 252 def close(self):
253 253 pass
254 254
255 255 def _restrictcapabilities(self, caps):
256 256 return caps
257 257
258 258 def _applyrequirements(self, requirements):
259 259 self.requirements = requirements
260 260 self.sopener.options = dict((r, 1) for r in requirements
261 261 if r in self.openerreqs)
262 262
263 263 def _writerequirements(self):
264 264 reqfile = self.opener("requires", "w")
265 265 for r in self.requirements:
266 266 reqfile.write("%s\n" % r)
267 267 reqfile.close()
268 268
269 269 def _checknested(self, path):
270 270 """Determine if path is a legal nested repository."""
271 271 if not path.startswith(self.root):
272 272 return False
273 273 subpath = path[len(self.root) + 1:]
274 274 normsubpath = util.pconvert(subpath)
275 275
276 276 # XXX: Checking against the current working copy is wrong in
277 277 # the sense that it can reject things like
278 278 #
279 279 # $ hg cat -r 10 sub/x.txt
280 280 #
281 281 # if sub/ is no longer a subrepository in the working copy
282 282 # parent revision.
283 283 #
284 284 # However, it can of course also allow things that would have
285 285 # been rejected before, such as the above cat command if sub/
286 286 # is a subrepository now, but was a normal directory before.
287 287 # The old path auditor would have rejected by mistake since it
288 288 # panics when it sees sub/.hg/.
289 289 #
290 290 # All in all, checking against the working copy seems sensible
291 291 # since we want to prevent access to nested repositories on
292 292 # the filesystem *now*.
293 293 ctx = self[None]
294 294 parts = util.splitpath(subpath)
295 295 while parts:
296 296 prefix = '/'.join(parts)
297 297 if prefix in ctx.substate:
298 298 if prefix == normsubpath:
299 299 return True
300 300 else:
301 301 sub = ctx.sub(prefix)
302 302 return sub.checknested(subpath[len(prefix) + 1:])
303 303 else:
304 304 parts.pop()
305 305 return False
306 306
307 307 def peer(self):
308 308 return localpeer(self) # not cached to avoid reference cycle
309 309
310 310 def unfiltered(self):
311 311 """Return unfiltered version of the repository
312 312
313 313 Intended to be ovewritten by filtered repo."""
314 314 return self
315 315
316 316 def filtered(self, name):
317 317 """Return a filtered version of a repository"""
318 318 # build a new class with the mixin and the current class
319 319 # (possibily subclass of the repo)
320 320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 321 pass
322 322 return proxycls(self, name)
323 323
324 324 @repofilecache('bookmarks')
325 325 def _bookmarks(self):
326 326 return bookmarks.bmstore(self)
327 327
328 328 @repofilecache('bookmarks.current')
329 329 def _bookmarkcurrent(self):
330 330 return bookmarks.readcurrent(self)
331 331
332 332 def bookmarkheads(self, bookmark):
333 333 name = bookmark.split('@', 1)[0]
334 334 heads = []
335 335 for mark, n in self._bookmarks.iteritems():
336 336 if mark.split('@', 1)[0] == name:
337 337 heads.append(n)
338 338 return heads
339 339
340 340 @storecache('phaseroots')
341 341 def _phasecache(self):
342 342 return phases.phasecache(self, self._phasedefaults)
343 343
344 344 @storecache('obsstore')
345 345 def obsstore(self):
346 346 store = obsolete.obsstore(self.sopener)
347 347 if store and not obsolete._enabled:
348 348 # message is rare enough to not be translated
349 349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 350 self.ui.warn(msg % len(list(store)))
351 351 return store
352 352
353 353 @unfilteredpropertycache
354 354 def hiddenrevs(self):
355 355 """hiddenrevs: revs that should be hidden by command and tools
356 356
357 357 This set is carried on the repo to ease initialization and lazy
358 358 loading; it'll probably move back to changelog for efficiency and
359 359 consistency reasons.
360 360
361 361 Note that the hiddenrevs will needs invalidations when
362 362 - a new changesets is added (possible unstable above extinct)
363 363 - a new obsolete marker is added (possible new extinct changeset)
364 364
365 365 hidden changesets cannot have non-hidden descendants
366 366 """
367 367 hidden = set()
368 368 if self.obsstore:
369 369 ### hide extinct changeset that are not accessible by any mean
370 370 hiddenquery = 'extinct() - ::(. + bookmark())'
371 371 hidden.update(self.revs(hiddenquery))
372 372 return hidden
373 373
374 374 @storecache('00changelog.i')
375 375 def changelog(self):
376 376 c = changelog.changelog(self.sopener)
377 377 if 'HG_PENDING' in os.environ:
378 378 p = os.environ['HG_PENDING']
379 379 if p.startswith(self.root):
380 380 c.readpending('00changelog.i.a')
381 381 return c
382 382
383 383 @storecache('00manifest.i')
384 384 def manifest(self):
385 385 return manifest.manifest(self.sopener)
386 386
387 387 @repofilecache('dirstate')
388 388 def dirstate(self):
389 389 warned = [0]
390 390 def validate(node):
391 391 try:
392 392 self.changelog.rev(node)
393 393 return node
394 394 except error.LookupError:
395 395 if not warned[0]:
396 396 warned[0] = True
397 397 self.ui.warn(_("warning: ignoring unknown"
398 398 " working parent %s!\n") % short(node))
399 399 return nullid
400 400
401 401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
402 402
403 403 def __getitem__(self, changeid):
404 404 if changeid is None:
405 405 return context.workingctx(self)
406 406 return context.changectx(self, changeid)
407 407
408 408 def __contains__(self, changeid):
409 409 try:
410 410 return bool(self.lookup(changeid))
411 411 except error.RepoLookupError:
412 412 return False
413 413
414 414 def __nonzero__(self):
415 415 return True
416 416
417 417 def __len__(self):
418 418 return len(self.changelog)
419 419
420 420 def __iter__(self):
421 421 return iter(self.changelog)
422 422
423 423 def revs(self, expr, *args):
424 424 '''Return a list of revisions matching the given revset'''
425 425 expr = revset.formatspec(expr, *args)
426 426 m = revset.match(None, expr)
427 427 return [r for r in m(self, list(self))]
428 428
429 429 def set(self, expr, *args):
430 430 '''
431 431 Yield a context for each matching revision, after doing arg
432 432 replacement via revset.formatspec
433 433 '''
434 434 for r in self.revs(expr, *args):
435 435 yield self[r]
436 436
437 437 def url(self):
438 438 return 'file:' + self.root
439 439
440 440 def hook(self, name, throw=False, **args):
441 441 return hook.hook(self.ui, self, name, throw, **args)
442 442
443 443 @unfilteredmethod
444 444 def _tag(self, names, node, message, local, user, date, extra={}):
445 445 if isinstance(names, str):
446 446 names = (names,)
447 447
448 448 branches = self.branchmap()
449 449 for name in names:
450 450 self.hook('pretag', throw=True, node=hex(node), tag=name,
451 451 local=local)
452 452 if name in branches:
453 453 self.ui.warn(_("warning: tag %s conflicts with existing"
454 454 " branch name\n") % name)
455 455
456 456 def writetags(fp, names, munge, prevtags):
457 457 fp.seek(0, 2)
458 458 if prevtags and prevtags[-1] != '\n':
459 459 fp.write('\n')
460 460 for name in names:
461 461 m = munge and munge(name) or name
462 462 if (self._tagscache.tagtypes and
463 463 name in self._tagscache.tagtypes):
464 464 old = self.tags().get(name, nullid)
465 465 fp.write('%s %s\n' % (hex(old), m))
466 466 fp.write('%s %s\n' % (hex(node), m))
467 467 fp.close()
468 468
469 469 prevtags = ''
470 470 if local:
471 471 try:
472 472 fp = self.opener('localtags', 'r+')
473 473 except IOError:
474 474 fp = self.opener('localtags', 'a')
475 475 else:
476 476 prevtags = fp.read()
477 477
478 478 # local tags are stored in the current charset
479 479 writetags(fp, names, None, prevtags)
480 480 for name in names:
481 481 self.hook('tag', node=hex(node), tag=name, local=local)
482 482 return
483 483
484 484 try:
485 485 fp = self.wfile('.hgtags', 'rb+')
486 486 except IOError, e:
487 487 if e.errno != errno.ENOENT:
488 488 raise
489 489 fp = self.wfile('.hgtags', 'ab')
490 490 else:
491 491 prevtags = fp.read()
492 492
493 493 # committed tags are stored in UTF-8
494 494 writetags(fp, names, encoding.fromlocal, prevtags)
495 495
496 496 fp.close()
497 497
498 498 self.invalidatecaches()
499 499
500 500 if '.hgtags' not in self.dirstate:
501 501 self[None].add(['.hgtags'])
502 502
503 503 m = matchmod.exact(self.root, '', ['.hgtags'])
504 504 tagnode = self.commit(message, user, date, extra=extra, match=m)
505 505
506 506 for name in names:
507 507 self.hook('tag', node=hex(node), tag=name, local=local)
508 508
509 509 return tagnode
510 510
511 511 def tag(self, names, node, message, local, user, date):
512 512 '''tag a revision with one or more symbolic names.
513 513
514 514 names is a list of strings or, when adding a single tag, names may be a
515 515 string.
516 516
517 517 if local is True, the tags are stored in a per-repository file.
518 518 otherwise, they are stored in the .hgtags file, and a new
519 519 changeset is committed with the change.
520 520
521 521 keyword arguments:
522 522
523 523 local: whether to store tags in non-version-controlled file
524 524 (default False)
525 525
526 526 message: commit message to use if committing
527 527
528 528 user: name of user to use if committing
529 529
530 530 date: date tuple to use if committing'''
531 531
532 532 if not local:
533 533 for x in self.status()[:5]:
534 534 if '.hgtags' in x:
535 535 raise util.Abort(_('working copy of .hgtags is changed '
536 536 '(please commit .hgtags manually)'))
537 537
538 538 self.tags() # instantiate the cache
539 539 self._tag(names, node, message, local, user, date)
540 540
541 541 @filteredpropertycache
542 542 def _tagscache(self):
543 543 '''Returns a tagscache object that contains various tags related
544 544 caches.'''
545 545
546 546 # This simplifies its cache management by having one decorated
547 547 # function (this one) and the rest simply fetch things from it.
548 548 class tagscache(object):
549 549 def __init__(self):
550 550 # These two define the set of tags for this repository. tags
551 551 # maps tag name to node; tagtypes maps tag name to 'global' or
552 552 # 'local'. (Global tags are defined by .hgtags across all
553 553 # heads, and local tags are defined in .hg/localtags.)
554 554 # They constitute the in-memory cache of tags.
555 555 self.tags = self.tagtypes = None
556 556
557 557 self.nodetagscache = self.tagslist = None
558 558
559 559 cache = tagscache()
560 560 cache.tags, cache.tagtypes = self._findtags()
561 561
562 562 return cache
563 563
564 564 def tags(self):
565 565 '''return a mapping of tag to node'''
566 566 t = {}
567 567 if self.changelog.filteredrevs:
568 568 tags, tt = self._findtags()
569 569 else:
570 570 tags = self._tagscache.tags
571 571 for k, v in tags.iteritems():
572 572 try:
573 573 # ignore tags to unknown nodes
574 574 self.changelog.rev(v)
575 575 t[k] = v
576 576 except (error.LookupError, ValueError):
577 577 pass
578 578 return t
579 579
580 580 def _findtags(self):
581 581 '''Do the hard work of finding tags. Return a pair of dicts
582 582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
583 583 maps tag name to a string like \'global\' or \'local\'.
584 584 Subclasses or extensions are free to add their own tags, but
585 585 should be aware that the returned dicts will be retained for the
586 586 duration of the localrepo object.'''
587 587
588 588 # XXX what tagtype should subclasses/extensions use? Currently
589 589 # mq and bookmarks add tags, but do not set the tagtype at all.
590 590 # Should each extension invent its own tag type? Should there
591 591 # be one tagtype for all such "virtual" tags? Or is the status
592 592 # quo fine?
593 593
594 594 alltags = {} # map tag name to (node, hist)
595 595 tagtypes = {}
596 596
597 597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
598 598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
599 599
600 600 # Build the return dicts. Have to re-encode tag names because
601 601 # the tags module always uses UTF-8 (in order not to lose info
602 602 # writing to the cache), but the rest of Mercurial wants them in
603 603 # local encoding.
604 604 tags = {}
605 605 for (name, (node, hist)) in alltags.iteritems():
606 606 if node != nullid:
607 607 tags[encoding.tolocal(name)] = node
608 608 tags['tip'] = self.changelog.tip()
609 609 tagtypes = dict([(encoding.tolocal(name), value)
610 610 for (name, value) in tagtypes.iteritems()])
611 611 return (tags, tagtypes)
612 612
613 613 def tagtype(self, tagname):
614 614 '''
615 615 return the type of the given tag. result can be:
616 616
617 617 'local' : a local tag
618 618 'global' : a global tag
619 619 None : tag does not exist
620 620 '''
621 621
622 622 return self._tagscache.tagtypes.get(tagname)
623 623
624 624 def tagslist(self):
625 625 '''return a list of tags ordered by revision'''
626 626 if not self._tagscache.tagslist:
627 627 l = []
628 628 for t, n in self.tags().iteritems():
629 629 r = self.changelog.rev(n)
630 630 l.append((r, t, n))
631 631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
632 632
633 633 return self._tagscache.tagslist
634 634
635 635 def nodetags(self, node):
636 636 '''return the tags associated with a node'''
637 637 if not self._tagscache.nodetagscache:
638 638 nodetagscache = {}
639 639 for t, n in self._tagscache.tags.iteritems():
640 640 nodetagscache.setdefault(n, []).append(t)
641 641 for tags in nodetagscache.itervalues():
642 642 tags.sort()
643 643 self._tagscache.nodetagscache = nodetagscache
644 644 return self._tagscache.nodetagscache.get(node, [])
645 645
646 646 def nodebookmarks(self, node):
647 647 marks = []
648 648 for bookmark, n in self._bookmarks.iteritems():
649 649 if n == node:
650 650 marks.append(bookmark)
651 651 return sorted(marks)
652 652
653 653 def branchmap(self):
654 654 '''returns a dictionary {branch: [branchheads]}'''
655 655 if self.filtername and not self.changelog.filteredrevs:
656 656 return self.unfiltered().branchmap()
657 657 branchmap.updatecache(self)
658 658 return self._branchcaches[self.filtername]
659 659
660 660
661 661 def _branchtip(self, heads):
662 662 '''return the tipmost branch head in heads'''
663 663 tip = heads[-1]
664 664 for h in reversed(heads):
665 665 if not self[h].closesbranch():
666 666 tip = h
667 667 break
668 668 return tip
669 669
670 670 def branchtip(self, branch):
671 671 '''return the tip node for a given branch'''
672 672 if branch not in self.branchmap():
673 673 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
674 674 return self._branchtip(self.branchmap()[branch])
675 675
676 676 def branchtags(self):
677 677 '''return a dict where branch names map to the tipmost head of
678 678 the branch, open heads come before closed'''
679 679 bt = {}
680 680 for bn, heads in self.branchmap().iteritems():
681 681 bt[bn] = self._branchtip(heads)
682 682 return bt
683 683
684 684 def lookup(self, key):
685 685 return self[key].node()
686 686
687 687 def lookupbranch(self, key, remote=None):
688 688 repo = remote or self
689 689 if key in repo.branchmap():
690 690 return key
691 691
692 692 repo = (remote and remote.local()) and remote or self
693 693 return repo[key].branch()
694 694
695 695 def known(self, nodes):
696 696 nm = self.changelog.nodemap
697 697 pc = self._phasecache
698 698 result = []
699 699 for n in nodes:
700 700 r = nm.get(n)
701 701 resp = not (r is None or pc.phase(self, r) >= phases.secret)
702 702 result.append(resp)
703 703 return result
704 704
705 705 def local(self):
706 706 return self
707 707
708 708 def cancopy(self):
709 709 return self.local() # so statichttprepo's override of local() works
710 710
711 711 def join(self, f):
712 712 return os.path.join(self.path, f)
713 713
714 714 def wjoin(self, f):
715 715 return os.path.join(self.root, f)
716 716
717 717 def file(self, f):
718 718 if f[0] == '/':
719 719 f = f[1:]
720 720 return filelog.filelog(self.sopener, f)
721 721
722 722 def changectx(self, changeid):
723 723 return self[changeid]
724 724
725 725 def parents(self, changeid=None):
726 726 '''get list of changectxs for parents of changeid'''
727 727 return self[changeid].parents()
728 728
729 729 def setparents(self, p1, p2=nullid):
730 730 copies = self.dirstate.setparents(p1, p2)
731 731 if copies:
732 732 # Adjust copy records, the dirstate cannot do it, it
733 733 # requires access to parents manifests. Preserve them
734 734 # only for entries added to first parent.
735 735 pctx = self[p1]
736 736 for f in copies:
737 737 if f not in pctx and copies[f] in pctx:
738 738 self.dirstate.copy(copies[f], f)
739 739
740 740 def filectx(self, path, changeid=None, fileid=None):
741 741 """changeid can be a changeset revision, node, or tag.
742 742 fileid can be a file revision or node."""
743 743 return context.filectx(self, path, changeid, fileid)
744 744
745 745 def getcwd(self):
746 746 return self.dirstate.getcwd()
747 747
748 748 def pathto(self, f, cwd=None):
749 749 return self.dirstate.pathto(f, cwd)
750 750
751 751 def wfile(self, f, mode='r'):
752 752 return self.wopener(f, mode)
753 753
754 754 def _link(self, f):
755 755 return os.path.islink(self.wjoin(f))
756 756
757 757 def _loadfilter(self, filter):
758 758 if filter not in self.filterpats:
759 759 l = []
760 760 for pat, cmd in self.ui.configitems(filter):
761 761 if cmd == '!':
762 762 continue
763 763 mf = matchmod.match(self.root, '', [pat])
764 764 fn = None
765 765 params = cmd
766 766 for name, filterfn in self._datafilters.iteritems():
767 767 if cmd.startswith(name):
768 768 fn = filterfn
769 769 params = cmd[len(name):].lstrip()
770 770 break
771 771 if not fn:
772 772 fn = lambda s, c, **kwargs: util.filter(s, c)
773 773 # Wrap old filters not supporting keyword arguments
774 774 if not inspect.getargspec(fn)[2]:
775 775 oldfn = fn
776 776 fn = lambda s, c, **kwargs: oldfn(s, c)
777 777 l.append((mf, fn, params))
778 778 self.filterpats[filter] = l
779 779 return self.filterpats[filter]
780 780
781 781 def _filter(self, filterpats, filename, data):
782 782 for mf, fn, cmd in filterpats:
783 783 if mf(filename):
784 784 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
785 785 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
786 786 break
787 787
788 788 return data
789 789
790 790 @unfilteredpropertycache
791 791 def _encodefilterpats(self):
792 792 return self._loadfilter('encode')
793 793
794 794 @unfilteredpropertycache
795 795 def _decodefilterpats(self):
796 796 return self._loadfilter('decode')
797 797
798 798 def adddatafilter(self, name, filter):
799 799 self._datafilters[name] = filter
800 800
801 801 def wread(self, filename):
802 802 if self._link(filename):
803 803 data = os.readlink(self.wjoin(filename))
804 804 else:
805 805 data = self.wopener.read(filename)
806 806 return self._filter(self._encodefilterpats, filename, data)
807 807
808 808 def wwrite(self, filename, data, flags):
809 809 data = self._filter(self._decodefilterpats, filename, data)
810 810 if 'l' in flags:
811 811 self.wopener.symlink(data, filename)
812 812 else:
813 813 self.wopener.write(filename, data)
814 814 if 'x' in flags:
815 815 util.setflags(self.wjoin(filename), False, True)
816 816
817 817 def wwritedata(self, filename, data):
818 818 return self._filter(self._decodefilterpats, filename, data)
819 819
820 820 def transaction(self, desc):
821 821 tr = self._transref and self._transref() or None
822 822 if tr and tr.running():
823 823 return tr.nest()
824 824
825 825 # abort here if the journal already exists
826 826 if os.path.exists(self.sjoin("journal")):
827 827 raise error.RepoError(
828 828 _("abandoned transaction found - run hg recover"))
829 829
830 830 self._writejournal(desc)
831 831 renames = [(x, undoname(x)) for x in self._journalfiles()]
832 832
833 833 tr = transaction.transaction(self.ui.warn, self.sopener,
834 834 self.sjoin("journal"),
835 835 aftertrans(renames),
836 836 self.store.createmode)
837 837 self._transref = weakref.ref(tr)
838 838 return tr
839 839
840 840 def _journalfiles(self):
841 841 return (self.sjoin('journal'), self.join('journal.dirstate'),
842 842 self.join('journal.branch'), self.join('journal.desc'),
843 843 self.join('journal.bookmarks'),
844 844 self.sjoin('journal.phaseroots'))
845 845
846 846 def undofiles(self):
847 847 return [undoname(x) for x in self._journalfiles()]
848 848
849 849 def _writejournal(self, desc):
850 850 self.opener.write("journal.dirstate",
851 851 self.opener.tryread("dirstate"))
852 852 self.opener.write("journal.branch",
853 853 encoding.fromlocal(self.dirstate.branch()))
854 854 self.opener.write("journal.desc",
855 855 "%d\n%s\n" % (len(self), desc))
856 856 self.opener.write("journal.bookmarks",
857 857 self.opener.tryread("bookmarks"))
858 858 self.sopener.write("journal.phaseroots",
859 859 self.sopener.tryread("phaseroots"))
860 860
861 861 def recover(self):
862 862 lock = self.lock()
863 863 try:
864 864 if os.path.exists(self.sjoin("journal")):
865 865 self.ui.status(_("rolling back interrupted transaction\n"))
866 866 transaction.rollback(self.sopener, self.sjoin("journal"),
867 867 self.ui.warn)
868 868 self.invalidate()
869 869 return True
870 870 else:
871 871 self.ui.warn(_("no interrupted transaction available\n"))
872 872 return False
873 873 finally:
874 874 lock.release()
875 875
876 876 def rollback(self, dryrun=False, force=False):
877 877 wlock = lock = None
878 878 try:
879 879 wlock = self.wlock()
880 880 lock = self.lock()
881 881 if os.path.exists(self.sjoin("undo")):
882 882 return self._rollback(dryrun, force)
883 883 else:
884 884 self.ui.warn(_("no rollback information available\n"))
885 885 return 1
886 886 finally:
887 887 release(lock, wlock)
888 888
889 889 @unfilteredmethod # Until we get smarter cache management
890 890 def _rollback(self, dryrun, force):
891 891 ui = self.ui
892 892 try:
893 893 args = self.opener.read('undo.desc').splitlines()
894 894 (oldlen, desc, detail) = (int(args[0]), args[1], None)
895 895 if len(args) >= 3:
896 896 detail = args[2]
897 897 oldtip = oldlen - 1
898 898
899 899 if detail and ui.verbose:
900 900 msg = (_('repository tip rolled back to revision %s'
901 901 ' (undo %s: %s)\n')
902 902 % (oldtip, desc, detail))
903 903 else:
904 904 msg = (_('repository tip rolled back to revision %s'
905 905 ' (undo %s)\n')
906 906 % (oldtip, desc))
907 907 except IOError:
908 908 msg = _('rolling back unknown transaction\n')
909 909 desc = None
910 910
911 911 if not force and self['.'] != self['tip'] and desc == 'commit':
912 912 raise util.Abort(
913 913 _('rollback of last commit while not checked out '
914 914 'may lose data'), hint=_('use -f to force'))
915 915
916 916 ui.status(msg)
917 917 if dryrun:
918 918 return 0
919 919
920 920 parents = self.dirstate.parents()
921 921 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
922 922 if os.path.exists(self.join('undo.bookmarks')):
923 923 util.rename(self.join('undo.bookmarks'),
924 924 self.join('bookmarks'))
925 925 if os.path.exists(self.sjoin('undo.phaseroots')):
926 926 util.rename(self.sjoin('undo.phaseroots'),
927 927 self.sjoin('phaseroots'))
928 928 self.invalidate()
929 929
930 930 # Discard all cache entries to force reloading everything.
931 931 self._filecache.clear()
932 932
933 933 parentgone = (parents[0] not in self.changelog.nodemap or
934 934 parents[1] not in self.changelog.nodemap)
935 935 if parentgone:
936 936 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
937 937 try:
938 938 branch = self.opener.read('undo.branch')
939 939 self.dirstate.setbranch(encoding.tolocal(branch))
940 940 except IOError:
941 941 ui.warn(_('named branch could not be reset: '
942 942 'current branch is still \'%s\'\n')
943 943 % self.dirstate.branch())
944 944
945 945 self.dirstate.invalidate()
946 946 parents = tuple([p.rev() for p in self.parents()])
947 947 if len(parents) > 1:
948 948 ui.status(_('working directory now based on '
949 949 'revisions %d and %d\n') % parents)
950 950 else:
951 951 ui.status(_('working directory now based on '
952 952 'revision %d\n') % parents)
953 953 # TODO: if we know which new heads may result from this rollback, pass
954 954 # them to destroy(), which will prevent the branchhead cache from being
955 955 # invalidated.
956 956 self.destroyed()
957 957 return 0
958 958
959 959 def invalidatecaches(self):
960 960
961 961 if '_tagscache' in vars(self):
962 962 # can't use delattr on proxy
963 963 del self.__dict__['_tagscache']
964 964
965 965 self.unfiltered()._branchcaches.clear()
966 966 self.invalidatevolatilesets()
967 967
968 968 def invalidatevolatilesets(self):
969 969 self.filteredrevcache.clear()
970 970 obsolete.clearobscaches(self)
971 971 if 'hiddenrevs' in vars(self):
972 972 del self.hiddenrevs
973 973
974 974 def invalidatedirstate(self):
975 975 '''Invalidates the dirstate, causing the next call to dirstate
976 976 to check if it was modified since the last time it was read,
977 977 rereading it if it has.
978 978
979 979 This is different to dirstate.invalidate() that it doesn't always
980 980 rereads the dirstate. Use dirstate.invalidate() if you want to
981 981 explicitly read the dirstate again (i.e. restoring it to a previous
982 982 known good state).'''
983 983 if hasunfilteredcache(self, 'dirstate'):
984 984 for k in self.dirstate._filecache:
985 985 try:
986 986 delattr(self.dirstate, k)
987 987 except AttributeError:
988 988 pass
989 989 delattr(self.unfiltered(), 'dirstate')
990 990
991 991 def invalidate(self):
992 992 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
993 993 for k in self._filecache:
994 994 # dirstate is invalidated separately in invalidatedirstate()
995 995 if k == 'dirstate':
996 996 continue
997 997
998 998 try:
999 999 delattr(unfiltered, k)
1000 1000 except AttributeError:
1001 1001 pass
1002 1002 self.invalidatecaches()
1003 1003
1004 1004 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1005 1005 try:
1006 1006 l = lock.lock(lockname, 0, releasefn, desc=desc)
1007 1007 except error.LockHeld, inst:
1008 1008 if not wait:
1009 1009 raise
1010 1010 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1011 1011 (desc, inst.locker))
1012 1012 # default to 600 seconds timeout
1013 1013 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1014 1014 releasefn, desc=desc)
1015 1015 if acquirefn:
1016 1016 acquirefn()
1017 1017 return l
1018 1018
1019 1019 def _afterlock(self, callback):
1020 1020 """add a callback to the current repository lock.
1021 1021
1022 1022 The callback will be executed on lock release."""
1023 1023 l = self._lockref and self._lockref()
1024 1024 if l:
1025 1025 l.postrelease.append(callback)
1026 1026 else:
1027 1027 callback()
1028 1028
1029 1029 def lock(self, wait=True):
1030 1030 '''Lock the repository store (.hg/store) and return a weak reference
1031 1031 to the lock. Use this before modifying the store (e.g. committing or
1032 1032 stripping). If you are opening a transaction, get a lock as well.)'''
1033 1033 l = self._lockref and self._lockref()
1034 1034 if l is not None and l.held:
1035 1035 l.lock()
1036 1036 return l
1037 1037
1038 1038 def unlock():
1039 1039 self.store.write()
1040 1040 if hasunfilteredcache(self, '_phasecache'):
1041 1041 self._phasecache.write()
1042 1042 for k, ce in self._filecache.items():
1043 1043 if k == 'dirstate':
1044 1044 continue
1045 1045 ce.refresh()
1046 1046
1047 1047 l = self._lock(self.sjoin("lock"), wait, unlock,
1048 1048 self.invalidate, _('repository %s') % self.origroot)
1049 1049 self._lockref = weakref.ref(l)
1050 1050 return l
1051 1051
1052 1052 def wlock(self, wait=True):
1053 1053 '''Lock the non-store parts of the repository (everything under
1054 1054 .hg except .hg/store) and return a weak reference to the lock.
1055 1055 Use this before modifying files in .hg.'''
1056 1056 l = self._wlockref and self._wlockref()
1057 1057 if l is not None and l.held:
1058 1058 l.lock()
1059 1059 return l
1060 1060
1061 1061 def unlock():
1062 1062 self.dirstate.write()
1063 1063 ce = self._filecache.get('dirstate')
1064 1064 if ce:
1065 1065 ce.refresh()
1066 1066
1067 1067 l = self._lock(self.join("wlock"), wait, unlock,
1068 1068 self.invalidatedirstate, _('working directory of %s') %
1069 1069 self.origroot)
1070 1070 self._wlockref = weakref.ref(l)
1071 1071 return l
1072 1072
1073 1073 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1074 1074 """
1075 1075 commit an individual file as part of a larger transaction
1076 1076 """
1077 1077
1078 1078 fname = fctx.path()
1079 1079 text = fctx.data()
1080 1080 flog = self.file(fname)
1081 1081 fparent1 = manifest1.get(fname, nullid)
1082 1082 fparent2 = fparent2o = manifest2.get(fname, nullid)
1083 1083
1084 1084 meta = {}
1085 1085 copy = fctx.renamed()
1086 1086 if copy and copy[0] != fname:
1087 1087 # Mark the new revision of this file as a copy of another
1088 1088 # file. This copy data will effectively act as a parent
1089 1089 # of this new revision. If this is a merge, the first
1090 1090 # parent will be the nullid (meaning "look up the copy data")
1091 1091 # and the second one will be the other parent. For example:
1092 1092 #
1093 1093 # 0 --- 1 --- 3 rev1 changes file foo
1094 1094 # \ / rev2 renames foo to bar and changes it
1095 1095 # \- 2 -/ rev3 should have bar with all changes and
1096 1096 # should record that bar descends from
1097 1097 # bar in rev2 and foo in rev1
1098 1098 #
1099 1099 # this allows this merge to succeed:
1100 1100 #
1101 1101 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1102 1102 # \ / merging rev3 and rev4 should use bar@rev2
1103 1103 # \- 2 --- 4 as the merge base
1104 1104 #
1105 1105
1106 1106 cfname = copy[0]
1107 1107 crev = manifest1.get(cfname)
1108 1108 newfparent = fparent2
1109 1109
1110 1110 if manifest2: # branch merge
1111 1111 if fparent2 == nullid or crev is None: # copied on remote side
1112 1112 if cfname in manifest2:
1113 1113 crev = manifest2[cfname]
1114 1114 newfparent = fparent1
1115 1115
1116 1116 # find source in nearest ancestor if we've lost track
1117 1117 if not crev:
1118 1118 self.ui.debug(" %s: searching for copy revision for %s\n" %
1119 1119 (fname, cfname))
1120 1120 for ancestor in self[None].ancestors():
1121 1121 if cfname in ancestor:
1122 1122 crev = ancestor[cfname].filenode()
1123 1123 break
1124 1124
1125 1125 if crev:
1126 1126 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1127 1127 meta["copy"] = cfname
1128 1128 meta["copyrev"] = hex(crev)
1129 1129 fparent1, fparent2 = nullid, newfparent
1130 1130 else:
1131 1131 self.ui.warn(_("warning: can't find ancestor for '%s' "
1132 1132 "copied from '%s'!\n") % (fname, cfname))
1133 1133
1134 1134 elif fparent2 != nullid:
1135 1135 # is one parent an ancestor of the other?
1136 1136 fparentancestor = flog.ancestor(fparent1, fparent2)
1137 1137 if fparentancestor == fparent1:
1138 1138 fparent1, fparent2 = fparent2, nullid
1139 1139 elif fparentancestor == fparent2:
1140 1140 fparent2 = nullid
1141 1141
1142 1142 # is the file changed?
1143 1143 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1144 1144 changelist.append(fname)
1145 1145 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1146 1146
1147 1147 # are just the flags changed during merge?
1148 1148 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1149 1149 changelist.append(fname)
1150 1150
1151 1151 return fparent1
1152 1152
1153 1153 @unfilteredmethod
1154 1154 def commit(self, text="", user=None, date=None, match=None, force=False,
1155 1155 editor=False, extra={}):
1156 1156 """Add a new revision to current repository.
1157 1157
1158 1158 Revision information is gathered from the working directory,
1159 1159 match can be used to filter the committed files. If editor is
1160 1160 supplied, it is called to get a commit message.
1161 1161 """
1162 1162
1163 1163 def fail(f, msg):
1164 1164 raise util.Abort('%s: %s' % (f, msg))
1165 1165
1166 1166 if not match:
1167 1167 match = matchmod.always(self.root, '')
1168 1168
1169 1169 if not force:
1170 1170 vdirs = []
1171 1171 match.dir = vdirs.append
1172 1172 match.bad = fail
1173 1173
1174 1174 wlock = self.wlock()
1175 1175 try:
1176 1176 wctx = self[None]
1177 1177 merge = len(wctx.parents()) > 1
1178 1178
1179 1179 if (not force and merge and match and
1180 1180 (match.files() or match.anypats())):
1181 1181 raise util.Abort(_('cannot partially commit a merge '
1182 1182 '(do not specify files or patterns)'))
1183 1183
1184 1184 changes = self.status(match=match, clean=force)
1185 1185 if force:
1186 1186 changes[0].extend(changes[6]) # mq may commit unchanged files
1187 1187
1188 1188 # check subrepos
1189 1189 subs = []
1190 1190 commitsubs = set()
1191 1191 newstate = wctx.substate.copy()
1192 1192 # only manage subrepos and .hgsubstate if .hgsub is present
1193 1193 if '.hgsub' in wctx:
1194 1194 # we'll decide whether to track this ourselves, thanks
1195 1195 if '.hgsubstate' in changes[0]:
1196 1196 changes[0].remove('.hgsubstate')
1197 1197 if '.hgsubstate' in changes[2]:
1198 1198 changes[2].remove('.hgsubstate')
1199 1199
1200 1200 # compare current state to last committed state
1201 1201 # build new substate based on last committed state
1202 1202 oldstate = wctx.p1().substate
1203 1203 for s in sorted(newstate.keys()):
1204 1204 if not match(s):
1205 1205 # ignore working copy, use old state if present
1206 1206 if s in oldstate:
1207 1207 newstate[s] = oldstate[s]
1208 1208 continue
1209 1209 if not force:
1210 1210 raise util.Abort(
1211 1211 _("commit with new subrepo %s excluded") % s)
1212 1212 if wctx.sub(s).dirty(True):
1213 1213 if not self.ui.configbool('ui', 'commitsubrepos'):
1214 1214 raise util.Abort(
1215 1215 _("uncommitted changes in subrepo %s") % s,
1216 1216 hint=_("use --subrepos for recursive commit"))
1217 1217 subs.append(s)
1218 1218 commitsubs.add(s)
1219 1219 else:
1220 1220 bs = wctx.sub(s).basestate()
1221 1221 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1222 1222 if oldstate.get(s, (None, None, None))[1] != bs:
1223 1223 subs.append(s)
1224 1224
1225 1225 # check for removed subrepos
1226 1226 for p in wctx.parents():
1227 1227 r = [s for s in p.substate if s not in newstate]
1228 1228 subs += [s for s in r if match(s)]
1229 1229 if subs:
1230 1230 if (not match('.hgsub') and
1231 1231 '.hgsub' in (wctx.modified() + wctx.added())):
1232 1232 raise util.Abort(
1233 1233 _("can't commit subrepos without .hgsub"))
1234 1234 changes[0].insert(0, '.hgsubstate')
1235 1235
1236 1236 elif '.hgsub' in changes[2]:
1237 1237 # clean up .hgsubstate when .hgsub is removed
1238 1238 if ('.hgsubstate' in wctx and
1239 1239 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1240 1240 changes[2].insert(0, '.hgsubstate')
1241 1241
1242 1242 # make sure all explicit patterns are matched
1243 1243 if not force and match.files():
1244 1244 matched = set(changes[0] + changes[1] + changes[2])
1245 1245
1246 1246 for f in match.files():
1247 1247 f = self.dirstate.normalize(f)
1248 1248 if f == '.' or f in matched or f in wctx.substate:
1249 1249 continue
1250 1250 if f in changes[3]: # missing
1251 1251 fail(f, _('file not found!'))
1252 1252 if f in vdirs: # visited directory
1253 1253 d = f + '/'
1254 1254 for mf in matched:
1255 1255 if mf.startswith(d):
1256 1256 break
1257 1257 else:
1258 1258 fail(f, _("no match under directory!"))
1259 1259 elif f not in self.dirstate:
1260 1260 fail(f, _("file not tracked!"))
1261 1261
1262 1262 if (not force and not extra.get("close") and not merge
1263 1263 and not (changes[0] or changes[1] or changes[2])
1264 1264 and wctx.branch() == wctx.p1().branch()):
1265 1265 return None
1266 1266
1267 1267 if merge and changes[3]:
1268 1268 raise util.Abort(_("cannot commit merge with missing files"))
1269 1269
1270 1270 ms = mergemod.mergestate(self)
1271 1271 for f in changes[0]:
1272 1272 if f in ms and ms[f] == 'u':
1273 1273 raise util.Abort(_("unresolved merge conflicts "
1274 1274 "(see hg help resolve)"))
1275 1275
1276 1276 cctx = context.workingctx(self, text, user, date, extra, changes)
1277 1277 if editor:
1278 1278 cctx._text = editor(self, cctx, subs)
1279 1279 edited = (text != cctx._text)
1280 1280
1281 1281 # commit subs and write new state
1282 1282 if subs:
1283 1283 for s in sorted(commitsubs):
1284 1284 sub = wctx.sub(s)
1285 1285 self.ui.status(_('committing subrepository %s\n') %
1286 1286 subrepo.subrelpath(sub))
1287 1287 sr = sub.commit(cctx._text, user, date)
1288 1288 newstate[s] = (newstate[s][0], sr)
1289 1289 subrepo.writestate(self, newstate)
1290 1290
1291 1291 # Save commit message in case this transaction gets rolled back
1292 1292 # (e.g. by a pretxncommit hook). Leave the content alone on
1293 1293 # the assumption that the user will use the same editor again.
1294 1294 msgfn = self.savecommitmessage(cctx._text)
1295 1295
1296 1296 p1, p2 = self.dirstate.parents()
1297 1297 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1298 1298 try:
1299 1299 self.hook("precommit", throw=True, parent1=hookp1,
1300 1300 parent2=hookp2)
1301 1301 ret = self.commitctx(cctx, True)
1302 1302 except: # re-raises
1303 1303 if edited:
1304 1304 self.ui.write(
1305 1305 _('note: commit message saved in %s\n') % msgfn)
1306 1306 raise
1307 1307
1308 1308 # update bookmarks, dirstate and mergestate
1309 1309 bookmarks.update(self, [p1, p2], ret)
1310 1310 for f in changes[0] + changes[1]:
1311 1311 self.dirstate.normal(f)
1312 1312 for f in changes[2]:
1313 1313 self.dirstate.drop(f)
1314 1314 self.dirstate.setparents(ret)
1315 1315 ms.reset()
1316 1316 finally:
1317 1317 wlock.release()
1318 1318
1319 1319 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1320 1320 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1321 1321 self._afterlock(commithook)
1322 1322 return ret
1323 1323
1324 1324 @unfilteredmethod
1325 1325 def commitctx(self, ctx, error=False):
1326 1326 """Add a new revision to current repository.
1327 1327 Revision information is passed via the context argument.
1328 1328 """
1329 1329
1330 1330 tr = lock = None
1331 1331 removed = list(ctx.removed())
1332 1332 p1, p2 = ctx.p1(), ctx.p2()
1333 1333 user = ctx.user()
1334 1334
1335 1335 lock = self.lock()
1336 1336 try:
1337 1337 tr = self.transaction("commit")
1338 1338 trp = weakref.proxy(tr)
1339 1339
1340 1340 if ctx.files():
1341 1341 m1 = p1.manifest().copy()
1342 1342 m2 = p2.manifest()
1343 1343
1344 1344 # check in files
1345 1345 new = {}
1346 1346 changed = []
1347 1347 linkrev = len(self)
1348 1348 for f in sorted(ctx.modified() + ctx.added()):
1349 1349 self.ui.note(f + "\n")
1350 1350 try:
1351 1351 fctx = ctx[f]
1352 1352 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1353 1353 changed)
1354 1354 m1.set(f, fctx.flags())
1355 1355 except OSError, inst:
1356 1356 self.ui.warn(_("trouble committing %s!\n") % f)
1357 1357 raise
1358 1358 except IOError, inst:
1359 1359 errcode = getattr(inst, 'errno', errno.ENOENT)
1360 1360 if error or errcode and errcode != errno.ENOENT:
1361 1361 self.ui.warn(_("trouble committing %s!\n") % f)
1362 1362 raise
1363 1363 else:
1364 1364 removed.append(f)
1365 1365
1366 1366 # update manifest
1367 1367 m1.update(new)
1368 1368 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1369 1369 drop = [f for f in removed if f in m1]
1370 1370 for f in drop:
1371 1371 del m1[f]
1372 1372 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1373 1373 p2.manifestnode(), (new, drop))
1374 1374 files = changed + removed
1375 1375 else:
1376 1376 mn = p1.manifestnode()
1377 1377 files = []
1378 1378
1379 1379 # update changelog
1380 1380 self.changelog.delayupdate()
1381 1381 n = self.changelog.add(mn, files, ctx.description(),
1382 1382 trp, p1.node(), p2.node(),
1383 1383 user, ctx.date(), ctx.extra().copy())
1384 1384 p = lambda: self.changelog.writepending() and self.root or ""
1385 1385 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1386 1386 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1387 1387 parent2=xp2, pending=p)
1388 1388 self.changelog.finalize(trp)
1389 1389 # set the new commit is proper phase
1390 1390 targetphase = phases.newcommitphase(self.ui)
1391 1391 if targetphase:
1392 1392 # retract boundary do not alter parent changeset.
1393 1393 # if a parent have higher the resulting phase will
1394 1394 # be compliant anyway
1395 1395 #
1396 1396 # if minimal phase was 0 we don't need to retract anything
1397 1397 phases.retractboundary(self, targetphase, [n])
1398 1398 tr.close()
1399 1399 branchmap.updatecache(self)
1400 1400 return n
1401 1401 finally:
1402 1402 if tr:
1403 1403 tr.release()
1404 1404 lock.release()
1405 1405
1406 1406 @unfilteredmethod
1407 1407 def destroyed(self, newheadnodes=None):
1408 1408 '''Inform the repository that nodes have been destroyed.
1409 1409 Intended for use by strip and rollback, so there's a common
1410 1410 place for anything that has to be done after destroying history.
1411 1411
1412 1412 If you know the branchheadcache was uptodate before nodes were removed
1413 1413 and you also know the set of candidate new heads that may have resulted
1414 1414 from the destruction, you can set newheadnodes. This will enable the
1415 1415 code to update the branchheads cache, rather than having future code
1416 1416 decide it's invalid and regenerating it from scratch.
1417 1417 '''
1418 1418 # If we have info, newheadnodes, on how to update the branch cache, do
1419 1419 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1420 1420 # will be caught the next time it is read.
1421 1421 if newheadnodes:
1422 1422 ctxgen = (self[node] for node in newheadnodes
1423 1423 if self.changelog.hasnode(node))
1424 1424 cache = self._branchcaches[None]
1425 1425 cache.update(self, ctxgen)
1426 1426 cache.write(self)
1427 1427
1428 # When one tries to:
1429 # 1) destroy nodes thus calling this method (e.g. strip)
1430 # 2) use phasecache somewhere (e.g. commit)
1431 #
1432 # then 2) will fail because the phasecache contains nodes that were
1433 # removed. We can either remove phasecache from the filecache,
1434 # causing it to reload next time it is accessed, or simply filter
1435 # the removed nodes now and write the updated cache.
1436 if '_phasecache' in self._filecache:
1437 self._phasecache.filterunknown(self)
1438 self._phasecache.write()
1439
1428 1440 # Ensure the persistent tag cache is updated. Doing it now
1429 1441 # means that the tag cache only has to worry about destroyed
1430 1442 # heads immediately after a strip/rollback. That in turn
1431 1443 # guarantees that "cachetip == currenttip" (comparing both rev
1432 1444 # and node) always means no nodes have been added or destroyed.
1433 1445
1434 1446 # XXX this is suboptimal when qrefresh'ing: we strip the current
1435 1447 # head, refresh the tag cache, then immediately add a new head.
1436 1448 # But I think doing it this way is necessary for the "instant
1437 1449 # tag cache retrieval" case to work.
1438 1450 self.invalidatecaches()
1439 1451
1440 1452 # Discard all cache entries to force reloading everything.
1441 1453 self._filecache.clear()
1442 1454
1443 1455 def walk(self, match, node=None):
1444 1456 '''
1445 1457 walk recursively through the directory tree or a given
1446 1458 changeset, finding all files matched by the match
1447 1459 function
1448 1460 '''
1449 1461 return self[node].walk(match)
1450 1462
1451 1463 def status(self, node1='.', node2=None, match=None,
1452 1464 ignored=False, clean=False, unknown=False,
1453 1465 listsubrepos=False):
1454 1466 """return status of files between two nodes or node and working
1455 1467 directory.
1456 1468
1457 1469 If node1 is None, use the first dirstate parent instead.
1458 1470 If node2 is None, compare node1 with working directory.
1459 1471 """
1460 1472
1461 1473 def mfmatches(ctx):
1462 1474 mf = ctx.manifest().copy()
1463 1475 if match.always():
1464 1476 return mf
1465 1477 for fn in mf.keys():
1466 1478 if not match(fn):
1467 1479 del mf[fn]
1468 1480 return mf
1469 1481
1470 1482 if isinstance(node1, context.changectx):
1471 1483 ctx1 = node1
1472 1484 else:
1473 1485 ctx1 = self[node1]
1474 1486 if isinstance(node2, context.changectx):
1475 1487 ctx2 = node2
1476 1488 else:
1477 1489 ctx2 = self[node2]
1478 1490
1479 1491 working = ctx2.rev() is None
1480 1492 parentworking = working and ctx1 == self['.']
1481 1493 match = match or matchmod.always(self.root, self.getcwd())
1482 1494 listignored, listclean, listunknown = ignored, clean, unknown
1483 1495
1484 1496 # load earliest manifest first for caching reasons
1485 1497 if not working and ctx2.rev() < ctx1.rev():
1486 1498 ctx2.manifest()
1487 1499
1488 1500 if not parentworking:
1489 1501 def bad(f, msg):
1490 1502 # 'f' may be a directory pattern from 'match.files()',
1491 1503 # so 'f not in ctx1' is not enough
1492 1504 if f not in ctx1 and f not in ctx1.dirs():
1493 1505 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1494 1506 match.bad = bad
1495 1507
1496 1508 if working: # we need to scan the working dir
1497 1509 subrepos = []
1498 1510 if '.hgsub' in self.dirstate:
1499 1511 subrepos = ctx2.substate.keys()
1500 1512 s = self.dirstate.status(match, subrepos, listignored,
1501 1513 listclean, listunknown)
1502 1514 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1503 1515
1504 1516 # check for any possibly clean files
1505 1517 if parentworking and cmp:
1506 1518 fixup = []
1507 1519 # do a full compare of any files that might have changed
1508 1520 for f in sorted(cmp):
1509 1521 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1510 1522 or ctx1[f].cmp(ctx2[f])):
1511 1523 modified.append(f)
1512 1524 else:
1513 1525 fixup.append(f)
1514 1526
1515 1527 # update dirstate for files that are actually clean
1516 1528 if fixup:
1517 1529 if listclean:
1518 1530 clean += fixup
1519 1531
1520 1532 try:
1521 1533 # updating the dirstate is optional
1522 1534 # so we don't wait on the lock
1523 1535 wlock = self.wlock(False)
1524 1536 try:
1525 1537 for f in fixup:
1526 1538 self.dirstate.normal(f)
1527 1539 finally:
1528 1540 wlock.release()
1529 1541 except error.LockError:
1530 1542 pass
1531 1543
1532 1544 if not parentworking:
1533 1545 mf1 = mfmatches(ctx1)
1534 1546 if working:
1535 1547 # we are comparing working dir against non-parent
1536 1548 # generate a pseudo-manifest for the working dir
1537 1549 mf2 = mfmatches(self['.'])
1538 1550 for f in cmp + modified + added:
1539 1551 mf2[f] = None
1540 1552 mf2.set(f, ctx2.flags(f))
1541 1553 for f in removed:
1542 1554 if f in mf2:
1543 1555 del mf2[f]
1544 1556 else:
1545 1557 # we are comparing two revisions
1546 1558 deleted, unknown, ignored = [], [], []
1547 1559 mf2 = mfmatches(ctx2)
1548 1560
1549 1561 modified, added, clean = [], [], []
1550 1562 withflags = mf1.withflags() | mf2.withflags()
1551 1563 for fn in mf2:
1552 1564 if fn in mf1:
1553 1565 if (fn not in deleted and
1554 1566 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1555 1567 (mf1[fn] != mf2[fn] and
1556 1568 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1557 1569 modified.append(fn)
1558 1570 elif listclean:
1559 1571 clean.append(fn)
1560 1572 del mf1[fn]
1561 1573 elif fn not in deleted:
1562 1574 added.append(fn)
1563 1575 removed = mf1.keys()
1564 1576
1565 1577 if working and modified and not self.dirstate._checklink:
1566 1578 # Symlink placeholders may get non-symlink-like contents
1567 1579 # via user error or dereferencing by NFS or Samba servers,
1568 1580 # so we filter out any placeholders that don't look like a
1569 1581 # symlink
1570 1582 sane = []
1571 1583 for f in modified:
1572 1584 if ctx2.flags(f) == 'l':
1573 1585 d = ctx2[f].data()
1574 1586 if len(d) >= 1024 or '\n' in d or util.binary(d):
1575 1587 self.ui.debug('ignoring suspect symlink placeholder'
1576 1588 ' "%s"\n' % f)
1577 1589 continue
1578 1590 sane.append(f)
1579 1591 modified = sane
1580 1592
1581 1593 r = modified, added, removed, deleted, unknown, ignored, clean
1582 1594
1583 1595 if listsubrepos:
1584 1596 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1585 1597 if working:
1586 1598 rev2 = None
1587 1599 else:
1588 1600 rev2 = ctx2.substate[subpath][1]
1589 1601 try:
1590 1602 submatch = matchmod.narrowmatcher(subpath, match)
1591 1603 s = sub.status(rev2, match=submatch, ignored=listignored,
1592 1604 clean=listclean, unknown=listunknown,
1593 1605 listsubrepos=True)
1594 1606 for rfiles, sfiles in zip(r, s):
1595 1607 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1596 1608 except error.LookupError:
1597 1609 self.ui.status(_("skipping missing subrepository: %s\n")
1598 1610 % subpath)
1599 1611
1600 1612 for l in r:
1601 1613 l.sort()
1602 1614 return r
1603 1615
1604 1616 def heads(self, start=None):
1605 1617 heads = self.changelog.heads(start)
1606 1618 # sort the output in rev descending order
1607 1619 return sorted(heads, key=self.changelog.rev, reverse=True)
1608 1620
1609 1621 def branchheads(self, branch=None, start=None, closed=False):
1610 1622 '''return a (possibly filtered) list of heads for the given branch
1611 1623
1612 1624 Heads are returned in topological order, from newest to oldest.
1613 1625 If branch is None, use the dirstate branch.
1614 1626 If start is not None, return only heads reachable from start.
1615 1627 If closed is True, return heads that are marked as closed as well.
1616 1628 '''
1617 1629 if branch is None:
1618 1630 branch = self[None].branch()
1619 1631 branches = self.branchmap()
1620 1632 if branch not in branches:
1621 1633 return []
1622 1634 # the cache returns heads ordered lowest to highest
1623 1635 bheads = list(reversed(branches[branch]))
1624 1636 if start is not None:
1625 1637 # filter out the heads that cannot be reached from startrev
1626 1638 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1627 1639 bheads = [h for h in bheads if h in fbheads]
1628 1640 if not closed:
1629 1641 bheads = [h for h in bheads if not self[h].closesbranch()]
1630 1642 return bheads
1631 1643
1632 1644 def branches(self, nodes):
1633 1645 if not nodes:
1634 1646 nodes = [self.changelog.tip()]
1635 1647 b = []
1636 1648 for n in nodes:
1637 1649 t = n
1638 1650 while True:
1639 1651 p = self.changelog.parents(n)
1640 1652 if p[1] != nullid or p[0] == nullid:
1641 1653 b.append((t, n, p[0], p[1]))
1642 1654 break
1643 1655 n = p[0]
1644 1656 return b
1645 1657
1646 1658 def between(self, pairs):
1647 1659 r = []
1648 1660
1649 1661 for top, bottom in pairs:
1650 1662 n, l, i = top, [], 0
1651 1663 f = 1
1652 1664
1653 1665 while n != bottom and n != nullid:
1654 1666 p = self.changelog.parents(n)[0]
1655 1667 if i == f:
1656 1668 l.append(n)
1657 1669 f = f * 2
1658 1670 n = p
1659 1671 i += 1
1660 1672
1661 1673 r.append(l)
1662 1674
1663 1675 return r
1664 1676
1665 1677 def pull(self, remote, heads=None, force=False):
1666 1678 # don't open transaction for nothing or you break future useful
1667 1679 # rollback call
1668 1680 tr = None
1669 1681 trname = 'pull\n' + util.hidepassword(remote.url())
1670 1682 lock = self.lock()
1671 1683 try:
1672 1684 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1673 1685 force=force)
1674 1686 common, fetch, rheads = tmp
1675 1687 if not fetch:
1676 1688 self.ui.status(_("no changes found\n"))
1677 1689 added = []
1678 1690 result = 0
1679 1691 else:
1680 1692 tr = self.transaction(trname)
1681 1693 if heads is None and list(common) == [nullid]:
1682 1694 self.ui.status(_("requesting all changes\n"))
1683 1695 elif heads is None and remote.capable('changegroupsubset'):
1684 1696 # issue1320, avoid a race if remote changed after discovery
1685 1697 heads = rheads
1686 1698
1687 1699 if remote.capable('getbundle'):
1688 1700 cg = remote.getbundle('pull', common=common,
1689 1701 heads=heads or rheads)
1690 1702 elif heads is None:
1691 1703 cg = remote.changegroup(fetch, 'pull')
1692 1704 elif not remote.capable('changegroupsubset'):
1693 1705 raise util.Abort(_("partial pull cannot be done because "
1694 1706 "other repository doesn't support "
1695 1707 "changegroupsubset."))
1696 1708 else:
1697 1709 cg = remote.changegroupsubset(fetch, heads, 'pull')
1698 1710 clstart = len(self.changelog)
1699 1711 result = self.addchangegroup(cg, 'pull', remote.url())
1700 1712 clend = len(self.changelog)
1701 1713 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1702 1714
1703 1715 # compute target subset
1704 1716 if heads is None:
1705 1717 # We pulled every thing possible
1706 1718 # sync on everything common
1707 1719 subset = common + added
1708 1720 else:
1709 1721 # We pulled a specific subset
1710 1722 # sync on this subset
1711 1723 subset = heads
1712 1724
1713 1725 # Get remote phases data from remote
1714 1726 remotephases = remote.listkeys('phases')
1715 1727 publishing = bool(remotephases.get('publishing', False))
1716 1728 if remotephases and not publishing:
1717 1729 # remote is new and unpublishing
1718 1730 pheads, _dr = phases.analyzeremotephases(self, subset,
1719 1731 remotephases)
1720 1732 phases.advanceboundary(self, phases.public, pheads)
1721 1733 phases.advanceboundary(self, phases.draft, subset)
1722 1734 else:
1723 1735 # Remote is old or publishing all common changesets
1724 1736 # should be seen as public
1725 1737 phases.advanceboundary(self, phases.public, subset)
1726 1738
1727 1739 if obsolete._enabled:
1728 1740 self.ui.debug('fetching remote obsolete markers\n')
1729 1741 remoteobs = remote.listkeys('obsolete')
1730 1742 if 'dump0' in remoteobs:
1731 1743 if tr is None:
1732 1744 tr = self.transaction(trname)
1733 1745 for key in sorted(remoteobs, reverse=True):
1734 1746 if key.startswith('dump'):
1735 1747 data = base85.b85decode(remoteobs[key])
1736 1748 self.obsstore.mergemarkers(tr, data)
1737 1749 self.invalidatevolatilesets()
1738 1750 if tr is not None:
1739 1751 tr.close()
1740 1752 finally:
1741 1753 if tr is not None:
1742 1754 tr.release()
1743 1755 lock.release()
1744 1756
1745 1757 return result
1746 1758
1747 1759 def checkpush(self, force, revs):
1748 1760 """Extensions can override this function if additional checks have
1749 1761 to be performed before pushing, or call it if they override push
1750 1762 command.
1751 1763 """
1752 1764 pass
1753 1765
1754 1766 def push(self, remote, force=False, revs=None, newbranch=False):
1755 1767 '''Push outgoing changesets (limited by revs) from the current
1756 1768 repository to remote. Return an integer:
1757 1769 - None means nothing to push
1758 1770 - 0 means HTTP error
1759 1771 - 1 means we pushed and remote head count is unchanged *or*
1760 1772 we have outgoing changesets but refused to push
1761 1773 - other values as described by addchangegroup()
1762 1774 '''
1763 1775 # there are two ways to push to remote repo:
1764 1776 #
1765 1777 # addchangegroup assumes local user can lock remote
1766 1778 # repo (local filesystem, old ssh servers).
1767 1779 #
1768 1780 # unbundle assumes local user cannot lock remote repo (new ssh
1769 1781 # servers, http servers).
1770 1782
1771 1783 if not remote.canpush():
1772 1784 raise util.Abort(_("destination does not support push"))
1773 1785 unfi = self.unfiltered()
1774 1786 # get local lock as we might write phase data
1775 1787 locallock = self.lock()
1776 1788 try:
1777 1789 self.checkpush(force, revs)
1778 1790 lock = None
1779 1791 unbundle = remote.capable('unbundle')
1780 1792 if not unbundle:
1781 1793 lock = remote.lock()
1782 1794 try:
1783 1795 # discovery
1784 1796 fci = discovery.findcommonincoming
1785 1797 commoninc = fci(unfi, remote, force=force)
1786 1798 common, inc, remoteheads = commoninc
1787 1799 fco = discovery.findcommonoutgoing
1788 1800 outgoing = fco(unfi, remote, onlyheads=revs,
1789 1801 commoninc=commoninc, force=force)
1790 1802
1791 1803
1792 1804 if not outgoing.missing:
1793 1805 # nothing to push
1794 1806 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1795 1807 ret = None
1796 1808 else:
1797 1809 # something to push
1798 1810 if not force:
1799 1811 # if self.obsstore == False --> no obsolete
1800 1812 # then, save the iteration
1801 1813 if unfi.obsstore:
1802 1814 # this message are here for 80 char limit reason
1803 1815 mso = _("push includes obsolete changeset: %s!")
1804 1816 mst = "push includes %s changeset: %s!"
1805 1817 # plain versions for i18n tool to detect them
1806 1818 _("push includes unstable changeset: %s!")
1807 1819 _("push includes bumped changeset: %s!")
1808 1820 _("push includes divergent changeset: %s!")
1809 1821 # If we are to push if there is at least one
1810 1822 # obsolete or unstable changeset in missing, at
1811 1823 # least one of the missinghead will be obsolete or
1812 1824 # unstable. So checking heads only is ok
1813 1825 for node in outgoing.missingheads:
1814 1826 ctx = unfi[node]
1815 1827 if ctx.obsolete():
1816 1828 raise util.Abort(mso % ctx)
1817 1829 elif ctx.troubled():
1818 1830 raise util.Abort(_(mst)
1819 1831 % (ctx.troubles()[0],
1820 1832 ctx))
1821 1833 discovery.checkheads(unfi, remote, outgoing,
1822 1834 remoteheads, newbranch,
1823 1835 bool(inc))
1824 1836
1825 1837 # create a changegroup from local
1826 1838 if revs is None and not outgoing.excluded:
1827 1839 # push everything,
1828 1840 # use the fast path, no race possible on push
1829 1841 cg = self._changegroup(outgoing.missing, 'push')
1830 1842 else:
1831 1843 cg = self.getlocalbundle('push', outgoing)
1832 1844
1833 1845 # apply changegroup to remote
1834 1846 if unbundle:
1835 1847 # local repo finds heads on server, finds out what
1836 1848 # revs it must push. once revs transferred, if server
1837 1849 # finds it has different heads (someone else won
1838 1850 # commit/push race), server aborts.
1839 1851 if force:
1840 1852 remoteheads = ['force']
1841 1853 # ssh: return remote's addchangegroup()
1842 1854 # http: return remote's addchangegroup() or 0 for error
1843 1855 ret = remote.unbundle(cg, remoteheads, 'push')
1844 1856 else:
1845 1857 # we return an integer indicating remote head count
1846 1858 # change
1847 1859 ret = remote.addchangegroup(cg, 'push', self.url())
1848 1860
1849 1861 if ret:
1850 1862 # push succeed, synchronize target of the push
1851 1863 cheads = outgoing.missingheads
1852 1864 elif revs is None:
1853 1865 # All out push fails. synchronize all common
1854 1866 cheads = outgoing.commonheads
1855 1867 else:
1856 1868 # I want cheads = heads(::missingheads and ::commonheads)
1857 1869 # (missingheads is revs with secret changeset filtered out)
1858 1870 #
1859 1871 # This can be expressed as:
1860 1872 # cheads = ( (missingheads and ::commonheads)
1861 1873 # + (commonheads and ::missingheads))"
1862 1874 # )
1863 1875 #
1864 1876 # while trying to push we already computed the following:
1865 1877 # common = (::commonheads)
1866 1878 # missing = ((commonheads::missingheads) - commonheads)
1867 1879 #
1868 1880 # We can pick:
1869 1881 # * missingheads part of common (::commonheads)
1870 1882 common = set(outgoing.common)
1871 1883 cheads = [node for node in revs if node in common]
1872 1884 # and
1873 1885 # * commonheads parents on missing
1874 1886 revset = unfi.set('%ln and parents(roots(%ln))',
1875 1887 outgoing.commonheads,
1876 1888 outgoing.missing)
1877 1889 cheads.extend(c.node() for c in revset)
1878 1890 # even when we don't push, exchanging phase data is useful
1879 1891 remotephases = remote.listkeys('phases')
1880 1892 if not remotephases: # old server or public only repo
1881 1893 phases.advanceboundary(self, phases.public, cheads)
1882 1894 # don't push any phase data as there is nothing to push
1883 1895 else:
1884 1896 ana = phases.analyzeremotephases(self, cheads, remotephases)
1885 1897 pheads, droots = ana
1886 1898 ### Apply remote phase on local
1887 1899 if remotephases.get('publishing', False):
1888 1900 phases.advanceboundary(self, phases.public, cheads)
1889 1901 else: # publish = False
1890 1902 phases.advanceboundary(self, phases.public, pheads)
1891 1903 phases.advanceboundary(self, phases.draft, cheads)
1892 1904 ### Apply local phase on remote
1893 1905
1894 1906 # Get the list of all revs draft on remote by public here.
1895 1907 # XXX Beware that revset break if droots is not strictly
1896 1908 # XXX root we may want to ensure it is but it is costly
1897 1909 outdated = unfi.set('heads((%ln::%ln) and public())',
1898 1910 droots, cheads)
1899 1911 for newremotehead in outdated:
1900 1912 r = remote.pushkey('phases',
1901 1913 newremotehead.hex(),
1902 1914 str(phases.draft),
1903 1915 str(phases.public))
1904 1916 if not r:
1905 1917 self.ui.warn(_('updating %s to public failed!\n')
1906 1918 % newremotehead)
1907 1919 self.ui.debug('try to push obsolete markers to remote\n')
1908 1920 if (obsolete._enabled and self.obsstore and
1909 1921 'obsolete' in remote.listkeys('namespaces')):
1910 1922 rslts = []
1911 1923 remotedata = self.listkeys('obsolete')
1912 1924 for key in sorted(remotedata, reverse=True):
1913 1925 # reverse sort to ensure we end with dump0
1914 1926 data = remotedata[key]
1915 1927 rslts.append(remote.pushkey('obsolete', key, '', data))
1916 1928 if [r for r in rslts if not r]:
1917 1929 msg = _('failed to push some obsolete markers!\n')
1918 1930 self.ui.warn(msg)
1919 1931 finally:
1920 1932 if lock is not None:
1921 1933 lock.release()
1922 1934 finally:
1923 1935 locallock.release()
1924 1936
1925 1937 self.ui.debug("checking for updated bookmarks\n")
1926 1938 rb = remote.listkeys('bookmarks')
1927 1939 for k in rb.keys():
1928 1940 if k in unfi._bookmarks:
1929 1941 nr, nl = rb[k], hex(self._bookmarks[k])
1930 1942 if nr in unfi:
1931 1943 cr = unfi[nr]
1932 1944 cl = unfi[nl]
1933 1945 if bookmarks.validdest(unfi, cr, cl):
1934 1946 r = remote.pushkey('bookmarks', k, nr, nl)
1935 1947 if r:
1936 1948 self.ui.status(_("updating bookmark %s\n") % k)
1937 1949 else:
1938 1950 self.ui.warn(_('updating bookmark %s'
1939 1951 ' failed!\n') % k)
1940 1952
1941 1953 return ret
1942 1954
1943 1955 def changegroupinfo(self, nodes, source):
1944 1956 if self.ui.verbose or source == 'bundle':
1945 1957 self.ui.status(_("%d changesets found\n") % len(nodes))
1946 1958 if self.ui.debugflag:
1947 1959 self.ui.debug("list of changesets:\n")
1948 1960 for node in nodes:
1949 1961 self.ui.debug("%s\n" % hex(node))
1950 1962
1951 1963 def changegroupsubset(self, bases, heads, source):
1952 1964 """Compute a changegroup consisting of all the nodes that are
1953 1965 descendants of any of the bases and ancestors of any of the heads.
1954 1966 Return a chunkbuffer object whose read() method will return
1955 1967 successive changegroup chunks.
1956 1968
1957 1969 It is fairly complex as determining which filenodes and which
1958 1970 manifest nodes need to be included for the changeset to be complete
1959 1971 is non-trivial.
1960 1972
1961 1973 Another wrinkle is doing the reverse, figuring out which changeset in
1962 1974 the changegroup a particular filenode or manifestnode belongs to.
1963 1975 """
1964 1976 cl = self.changelog
1965 1977 if not bases:
1966 1978 bases = [nullid]
1967 1979 csets, bases, heads = cl.nodesbetween(bases, heads)
1968 1980 # We assume that all ancestors of bases are known
1969 1981 common = cl.ancestors([cl.rev(n) for n in bases])
1970 1982 return self._changegroupsubset(common, csets, heads, source)
1971 1983
1972 1984 def getlocalbundle(self, source, outgoing):
1973 1985 """Like getbundle, but taking a discovery.outgoing as an argument.
1974 1986
1975 1987 This is only implemented for local repos and reuses potentially
1976 1988 precomputed sets in outgoing."""
1977 1989 if not outgoing.missing:
1978 1990 return None
1979 1991 return self._changegroupsubset(outgoing.common,
1980 1992 outgoing.missing,
1981 1993 outgoing.missingheads,
1982 1994 source)
1983 1995
1984 1996 def getbundle(self, source, heads=None, common=None):
1985 1997 """Like changegroupsubset, but returns the set difference between the
1986 1998 ancestors of heads and the ancestors common.
1987 1999
1988 2000 If heads is None, use the local heads. If common is None, use [nullid].
1989 2001
1990 2002 The nodes in common might not all be known locally due to the way the
1991 2003 current discovery protocol works.
1992 2004 """
1993 2005 cl = self.changelog
1994 2006 if common:
1995 2007 hasnode = cl.hasnode
1996 2008 common = [n for n in common if hasnode(n)]
1997 2009 else:
1998 2010 common = [nullid]
1999 2011 if not heads:
2000 2012 heads = cl.heads()
2001 2013 return self.getlocalbundle(source,
2002 2014 discovery.outgoing(cl, common, heads))
2003 2015
2004 2016 @unfilteredmethod
2005 2017 def _changegroupsubset(self, commonrevs, csets, heads, source):
2006 2018
2007 2019 cl = self.changelog
2008 2020 mf = self.manifest
2009 2021 mfs = {} # needed manifests
2010 2022 fnodes = {} # needed file nodes
2011 2023 changedfiles = set()
2012 2024 fstate = ['', {}]
2013 2025 count = [0, 0]
2014 2026
2015 2027 # can we go through the fast path ?
2016 2028 heads.sort()
2017 2029 if heads == sorted(self.heads()):
2018 2030 return self._changegroup(csets, source)
2019 2031
2020 2032 # slow path
2021 2033 self.hook('preoutgoing', throw=True, source=source)
2022 2034 self.changegroupinfo(csets, source)
2023 2035
2024 2036 # filter any nodes that claim to be part of the known set
2025 2037 def prune(revlog, missing):
2026 2038 rr, rl = revlog.rev, revlog.linkrev
2027 2039 return [n for n in missing
2028 2040 if rl(rr(n)) not in commonrevs]
2029 2041
2030 2042 progress = self.ui.progress
2031 2043 _bundling = _('bundling')
2032 2044 _changesets = _('changesets')
2033 2045 _manifests = _('manifests')
2034 2046 _files = _('files')
2035 2047
2036 2048 def lookup(revlog, x):
2037 2049 if revlog == cl:
2038 2050 c = cl.read(x)
2039 2051 changedfiles.update(c[3])
2040 2052 mfs.setdefault(c[0], x)
2041 2053 count[0] += 1
2042 2054 progress(_bundling, count[0],
2043 2055 unit=_changesets, total=count[1])
2044 2056 return x
2045 2057 elif revlog == mf:
2046 2058 clnode = mfs[x]
2047 2059 mdata = mf.readfast(x)
2048 2060 for f, n in mdata.iteritems():
2049 2061 if f in changedfiles:
2050 2062 fnodes[f].setdefault(n, clnode)
2051 2063 count[0] += 1
2052 2064 progress(_bundling, count[0],
2053 2065 unit=_manifests, total=count[1])
2054 2066 return clnode
2055 2067 else:
2056 2068 progress(_bundling, count[0], item=fstate[0],
2057 2069 unit=_files, total=count[1])
2058 2070 return fstate[1][x]
2059 2071
2060 2072 bundler = changegroup.bundle10(lookup)
2061 2073 reorder = self.ui.config('bundle', 'reorder', 'auto')
2062 2074 if reorder == 'auto':
2063 2075 reorder = None
2064 2076 else:
2065 2077 reorder = util.parsebool(reorder)
2066 2078
2067 2079 def gengroup():
2068 2080 # Create a changenode group generator that will call our functions
2069 2081 # back to lookup the owning changenode and collect information.
2070 2082 count[:] = [0, len(csets)]
2071 2083 for chunk in cl.group(csets, bundler, reorder=reorder):
2072 2084 yield chunk
2073 2085 progress(_bundling, None)
2074 2086
2075 2087 # Create a generator for the manifestnodes that calls our lookup
2076 2088 # and data collection functions back.
2077 2089 for f in changedfiles:
2078 2090 fnodes[f] = {}
2079 2091 count[:] = [0, len(mfs)]
2080 2092 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2081 2093 yield chunk
2082 2094 progress(_bundling, None)
2083 2095
2084 2096 mfs.clear()
2085 2097
2086 2098 # Go through all our files in order sorted by name.
2087 2099 count[:] = [0, len(changedfiles)]
2088 2100 for fname in sorted(changedfiles):
2089 2101 filerevlog = self.file(fname)
2090 2102 if not len(filerevlog):
2091 2103 raise util.Abort(_("empty or missing revlog for %s")
2092 2104 % fname)
2093 2105 fstate[0] = fname
2094 2106 fstate[1] = fnodes.pop(fname, {})
2095 2107
2096 2108 nodelist = prune(filerevlog, fstate[1])
2097 2109 if nodelist:
2098 2110 count[0] += 1
2099 2111 yield bundler.fileheader(fname)
2100 2112 for chunk in filerevlog.group(nodelist, bundler, reorder):
2101 2113 yield chunk
2102 2114
2103 2115 # Signal that no more groups are left.
2104 2116 yield bundler.close()
2105 2117 progress(_bundling, None)
2106 2118
2107 2119 if csets:
2108 2120 self.hook('outgoing', node=hex(csets[0]), source=source)
2109 2121
2110 2122 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2111 2123
2112 2124 def changegroup(self, basenodes, source):
2113 2125 # to avoid a race we use changegroupsubset() (issue1320)
2114 2126 return self.changegroupsubset(basenodes, self.heads(), source)
2115 2127
2116 2128 @unfilteredmethod
2117 2129 def _changegroup(self, nodes, source):
2118 2130 """Compute the changegroup of all nodes that we have that a recipient
2119 2131 doesn't. Return a chunkbuffer object whose read() method will return
2120 2132 successive changegroup chunks.
2121 2133
2122 2134 This is much easier than the previous function as we can assume that
2123 2135 the recipient has any changenode we aren't sending them.
2124 2136
2125 2137 nodes is the set of nodes to send"""
2126 2138
2127 2139 cl = self.changelog
2128 2140 mf = self.manifest
2129 2141 mfs = {}
2130 2142 changedfiles = set()
2131 2143 fstate = ['']
2132 2144 count = [0, 0]
2133 2145
2134 2146 self.hook('preoutgoing', throw=True, source=source)
2135 2147 self.changegroupinfo(nodes, source)
2136 2148
2137 2149 revset = set([cl.rev(n) for n in nodes])
2138 2150
2139 2151 def gennodelst(log):
2140 2152 ln, llr = log.node, log.linkrev
2141 2153 return [ln(r) for r in log if llr(r) in revset]
2142 2154
2143 2155 progress = self.ui.progress
2144 2156 _bundling = _('bundling')
2145 2157 _changesets = _('changesets')
2146 2158 _manifests = _('manifests')
2147 2159 _files = _('files')
2148 2160
2149 2161 def lookup(revlog, x):
2150 2162 if revlog == cl:
2151 2163 c = cl.read(x)
2152 2164 changedfiles.update(c[3])
2153 2165 mfs.setdefault(c[0], x)
2154 2166 count[0] += 1
2155 2167 progress(_bundling, count[0],
2156 2168 unit=_changesets, total=count[1])
2157 2169 return x
2158 2170 elif revlog == mf:
2159 2171 count[0] += 1
2160 2172 progress(_bundling, count[0],
2161 2173 unit=_manifests, total=count[1])
2162 2174 return cl.node(revlog.linkrev(revlog.rev(x)))
2163 2175 else:
2164 2176 progress(_bundling, count[0], item=fstate[0],
2165 2177 total=count[1], unit=_files)
2166 2178 return cl.node(revlog.linkrev(revlog.rev(x)))
2167 2179
2168 2180 bundler = changegroup.bundle10(lookup)
2169 2181 reorder = self.ui.config('bundle', 'reorder', 'auto')
2170 2182 if reorder == 'auto':
2171 2183 reorder = None
2172 2184 else:
2173 2185 reorder = util.parsebool(reorder)
2174 2186
2175 2187 def gengroup():
2176 2188 '''yield a sequence of changegroup chunks (strings)'''
2177 2189 # construct a list of all changed files
2178 2190
2179 2191 count[:] = [0, len(nodes)]
2180 2192 for chunk in cl.group(nodes, bundler, reorder=reorder):
2181 2193 yield chunk
2182 2194 progress(_bundling, None)
2183 2195
2184 2196 count[:] = [0, len(mfs)]
2185 2197 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2186 2198 yield chunk
2187 2199 progress(_bundling, None)
2188 2200
2189 2201 count[:] = [0, len(changedfiles)]
2190 2202 for fname in sorted(changedfiles):
2191 2203 filerevlog = self.file(fname)
2192 2204 if not len(filerevlog):
2193 2205 raise util.Abort(_("empty or missing revlog for %s")
2194 2206 % fname)
2195 2207 fstate[0] = fname
2196 2208 nodelist = gennodelst(filerevlog)
2197 2209 if nodelist:
2198 2210 count[0] += 1
2199 2211 yield bundler.fileheader(fname)
2200 2212 for chunk in filerevlog.group(nodelist, bundler, reorder):
2201 2213 yield chunk
2202 2214 yield bundler.close()
2203 2215 progress(_bundling, None)
2204 2216
2205 2217 if nodes:
2206 2218 self.hook('outgoing', node=hex(nodes[0]), source=source)
2207 2219
2208 2220 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2209 2221
2210 2222 @unfilteredmethod
2211 2223 def addchangegroup(self, source, srctype, url, emptyok=False):
2212 2224 """Add the changegroup returned by source.read() to this repo.
2213 2225 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2214 2226 the URL of the repo where this changegroup is coming from.
2215 2227
2216 2228 Return an integer summarizing the change to this repo:
2217 2229 - nothing changed or no source: 0
2218 2230 - more heads than before: 1+added heads (2..n)
2219 2231 - fewer heads than before: -1-removed heads (-2..-n)
2220 2232 - number of heads stays the same: 1
2221 2233 """
2222 2234 def csmap(x):
2223 2235 self.ui.debug("add changeset %s\n" % short(x))
2224 2236 return len(cl)
2225 2237
2226 2238 def revmap(x):
2227 2239 return cl.rev(x)
2228 2240
2229 2241 if not source:
2230 2242 return 0
2231 2243
2232 2244 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2233 2245
2234 2246 changesets = files = revisions = 0
2235 2247 efiles = set()
2236 2248
2237 2249 # write changelog data to temp files so concurrent readers will not see
2238 2250 # inconsistent view
2239 2251 cl = self.changelog
2240 2252 cl.delayupdate()
2241 2253 oldheads = cl.heads()
2242 2254
2243 2255 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2244 2256 try:
2245 2257 trp = weakref.proxy(tr)
2246 2258 # pull off the changeset group
2247 2259 self.ui.status(_("adding changesets\n"))
2248 2260 clstart = len(cl)
2249 2261 class prog(object):
2250 2262 step = _('changesets')
2251 2263 count = 1
2252 2264 ui = self.ui
2253 2265 total = None
2254 2266 def __call__(self):
2255 2267 self.ui.progress(self.step, self.count, unit=_('chunks'),
2256 2268 total=self.total)
2257 2269 self.count += 1
2258 2270 pr = prog()
2259 2271 source.callback = pr
2260 2272
2261 2273 source.changelogheader()
2262 2274 srccontent = cl.addgroup(source, csmap, trp)
2263 2275 if not (srccontent or emptyok):
2264 2276 raise util.Abort(_("received changelog group is empty"))
2265 2277 clend = len(cl)
2266 2278 changesets = clend - clstart
2267 2279 for c in xrange(clstart, clend):
2268 2280 efiles.update(self[c].files())
2269 2281 efiles = len(efiles)
2270 2282 self.ui.progress(_('changesets'), None)
2271 2283
2272 2284 # pull off the manifest group
2273 2285 self.ui.status(_("adding manifests\n"))
2274 2286 pr.step = _('manifests')
2275 2287 pr.count = 1
2276 2288 pr.total = changesets # manifests <= changesets
2277 2289 # no need to check for empty manifest group here:
2278 2290 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2279 2291 # no new manifest will be created and the manifest group will
2280 2292 # be empty during the pull
2281 2293 source.manifestheader()
2282 2294 self.manifest.addgroup(source, revmap, trp)
2283 2295 self.ui.progress(_('manifests'), None)
2284 2296
2285 2297 needfiles = {}
2286 2298 if self.ui.configbool('server', 'validate', default=False):
2287 2299 # validate incoming csets have their manifests
2288 2300 for cset in xrange(clstart, clend):
2289 2301 mfest = self.changelog.read(self.changelog.node(cset))[0]
2290 2302 mfest = self.manifest.readdelta(mfest)
2291 2303 # store file nodes we must see
2292 2304 for f, n in mfest.iteritems():
2293 2305 needfiles.setdefault(f, set()).add(n)
2294 2306
2295 2307 # process the files
2296 2308 self.ui.status(_("adding file changes\n"))
2297 2309 pr.step = _('files')
2298 2310 pr.count = 1
2299 2311 pr.total = efiles
2300 2312 source.callback = None
2301 2313
2302 2314 while True:
2303 2315 chunkdata = source.filelogheader()
2304 2316 if not chunkdata:
2305 2317 break
2306 2318 f = chunkdata["filename"]
2307 2319 self.ui.debug("adding %s revisions\n" % f)
2308 2320 pr()
2309 2321 fl = self.file(f)
2310 2322 o = len(fl)
2311 2323 if not fl.addgroup(source, revmap, trp):
2312 2324 raise util.Abort(_("received file revlog group is empty"))
2313 2325 revisions += len(fl) - o
2314 2326 files += 1
2315 2327 if f in needfiles:
2316 2328 needs = needfiles[f]
2317 2329 for new in xrange(o, len(fl)):
2318 2330 n = fl.node(new)
2319 2331 if n in needs:
2320 2332 needs.remove(n)
2321 2333 if not needs:
2322 2334 del needfiles[f]
2323 2335 self.ui.progress(_('files'), None)
2324 2336
2325 2337 for f, needs in needfiles.iteritems():
2326 2338 fl = self.file(f)
2327 2339 for n in needs:
2328 2340 try:
2329 2341 fl.rev(n)
2330 2342 except error.LookupError:
2331 2343 raise util.Abort(
2332 2344 _('missing file data for %s:%s - run hg verify') %
2333 2345 (f, hex(n)))
2334 2346
2335 2347 dh = 0
2336 2348 if oldheads:
2337 2349 heads = cl.heads()
2338 2350 dh = len(heads) - len(oldheads)
2339 2351 for h in heads:
2340 2352 if h not in oldheads and self[h].closesbranch():
2341 2353 dh -= 1
2342 2354 htext = ""
2343 2355 if dh:
2344 2356 htext = _(" (%+d heads)") % dh
2345 2357
2346 2358 self.ui.status(_("added %d changesets"
2347 2359 " with %d changes to %d files%s\n")
2348 2360 % (changesets, revisions, files, htext))
2349 2361 self.invalidatevolatilesets()
2350 2362
2351 2363 if changesets > 0:
2352 2364 p = lambda: cl.writepending() and self.root or ""
2353 2365 self.hook('pretxnchangegroup', throw=True,
2354 2366 node=hex(cl.node(clstart)), source=srctype,
2355 2367 url=url, pending=p)
2356 2368
2357 2369 added = [cl.node(r) for r in xrange(clstart, clend)]
2358 2370 publishing = self.ui.configbool('phases', 'publish', True)
2359 2371 if srctype == 'push':
2360 2372 # Old server can not push the boundary themself.
2361 2373 # New server won't push the boundary if changeset already
2362 2374 # existed locally as secrete
2363 2375 #
2364 2376 # We should not use added here but the list of all change in
2365 2377 # the bundle
2366 2378 if publishing:
2367 2379 phases.advanceboundary(self, phases.public, srccontent)
2368 2380 else:
2369 2381 phases.advanceboundary(self, phases.draft, srccontent)
2370 2382 phases.retractboundary(self, phases.draft, added)
2371 2383 elif srctype != 'strip':
2372 2384 # publishing only alter behavior during push
2373 2385 #
2374 2386 # strip should not touch boundary at all
2375 2387 phases.retractboundary(self, phases.draft, added)
2376 2388
2377 2389 # make changelog see real files again
2378 2390 cl.finalize(trp)
2379 2391
2380 2392 tr.close()
2381 2393
2382 2394 if changesets > 0:
2383 2395 if srctype != 'strip':
2384 2396 # During strip, branchcache is invalid but coming call to
2385 2397 # `destroyed` will repair it.
2386 2398 # In other case we can safely update cache on disk.
2387 2399 branchmap.updatecache(self)
2388 2400 def runhooks():
2389 2401 # forcefully update the on-disk branch cache
2390 2402 self.ui.debug("updating the branch cache\n")
2391 2403 self.hook("changegroup", node=hex(cl.node(clstart)),
2392 2404 source=srctype, url=url)
2393 2405
2394 2406 for n in added:
2395 2407 self.hook("incoming", node=hex(n), source=srctype,
2396 2408 url=url)
2397 2409 self._afterlock(runhooks)
2398 2410
2399 2411 finally:
2400 2412 tr.release()
2401 2413 # never return 0 here:
2402 2414 if dh < 0:
2403 2415 return dh - 1
2404 2416 else:
2405 2417 return dh + 1
2406 2418
2407 2419 def stream_in(self, remote, requirements):
2408 2420 lock = self.lock()
2409 2421 try:
2410 2422 # Save remote branchmap. We will use it later
2411 2423 # to speed up branchcache creation
2412 2424 rbranchmap = None
2413 2425 if remote.capable("branchmap"):
2414 2426 rbranchmap = remote.branchmap()
2415 2427
2416 2428 fp = remote.stream_out()
2417 2429 l = fp.readline()
2418 2430 try:
2419 2431 resp = int(l)
2420 2432 except ValueError:
2421 2433 raise error.ResponseError(
2422 2434 _('unexpected response from remote server:'), l)
2423 2435 if resp == 1:
2424 2436 raise util.Abort(_('operation forbidden by server'))
2425 2437 elif resp == 2:
2426 2438 raise util.Abort(_('locking the remote repository failed'))
2427 2439 elif resp != 0:
2428 2440 raise util.Abort(_('the server sent an unknown error code'))
2429 2441 self.ui.status(_('streaming all changes\n'))
2430 2442 l = fp.readline()
2431 2443 try:
2432 2444 total_files, total_bytes = map(int, l.split(' ', 1))
2433 2445 except (ValueError, TypeError):
2434 2446 raise error.ResponseError(
2435 2447 _('unexpected response from remote server:'), l)
2436 2448 self.ui.status(_('%d files to transfer, %s of data\n') %
2437 2449 (total_files, util.bytecount(total_bytes)))
2438 2450 handled_bytes = 0
2439 2451 self.ui.progress(_('clone'), 0, total=total_bytes)
2440 2452 start = time.time()
2441 2453 for i in xrange(total_files):
2442 2454 # XXX doesn't support '\n' or '\r' in filenames
2443 2455 l = fp.readline()
2444 2456 try:
2445 2457 name, size = l.split('\0', 1)
2446 2458 size = int(size)
2447 2459 except (ValueError, TypeError):
2448 2460 raise error.ResponseError(
2449 2461 _('unexpected response from remote server:'), l)
2450 2462 if self.ui.debugflag:
2451 2463 self.ui.debug('adding %s (%s)\n' %
2452 2464 (name, util.bytecount(size)))
2453 2465 # for backwards compat, name was partially encoded
2454 2466 ofp = self.sopener(store.decodedir(name), 'w')
2455 2467 for chunk in util.filechunkiter(fp, limit=size):
2456 2468 handled_bytes += len(chunk)
2457 2469 self.ui.progress(_('clone'), handled_bytes,
2458 2470 total=total_bytes)
2459 2471 ofp.write(chunk)
2460 2472 ofp.close()
2461 2473 elapsed = time.time() - start
2462 2474 if elapsed <= 0:
2463 2475 elapsed = 0.001
2464 2476 self.ui.progress(_('clone'), None)
2465 2477 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2466 2478 (util.bytecount(total_bytes), elapsed,
2467 2479 util.bytecount(total_bytes / elapsed)))
2468 2480
2469 2481 # new requirements = old non-format requirements +
2470 2482 # new format-related
2471 2483 # requirements from the streamed-in repository
2472 2484 requirements.update(set(self.requirements) - self.supportedformats)
2473 2485 self._applyrequirements(requirements)
2474 2486 self._writerequirements()
2475 2487
2476 2488 if rbranchmap:
2477 2489 rbheads = []
2478 2490 for bheads in rbranchmap.itervalues():
2479 2491 rbheads.extend(bheads)
2480 2492
2481 2493 if rbheads:
2482 2494 rtiprev = max((int(self.changelog.rev(node))
2483 2495 for node in rbheads))
2484 2496 cache = branchmap.branchcache(rbranchmap,
2485 2497 self[rtiprev].node(),
2486 2498 rtiprev)
2487 2499 self._branchcaches[None] = cache
2488 2500 cache.write(self.unfiltered())
2489 2501 self.invalidate()
2490 2502 return len(self.heads()) + 1
2491 2503 finally:
2492 2504 lock.release()
2493 2505
2494 2506 def clone(self, remote, heads=[], stream=False):
2495 2507 '''clone remote repository.
2496 2508
2497 2509 keyword arguments:
2498 2510 heads: list of revs to clone (forces use of pull)
2499 2511 stream: use streaming clone if possible'''
2500 2512
2501 2513 # now, all clients that can request uncompressed clones can
2502 2514 # read repo formats supported by all servers that can serve
2503 2515 # them.
2504 2516
2505 2517 # if revlog format changes, client will have to check version
2506 2518 # and format flags on "stream" capability, and use
2507 2519 # uncompressed only if compatible.
2508 2520
2509 2521 if not stream:
2510 2522 # if the server explicitly prefers to stream (for fast LANs)
2511 2523 stream = remote.capable('stream-preferred')
2512 2524
2513 2525 if stream and not heads:
2514 2526 # 'stream' means remote revlog format is revlogv1 only
2515 2527 if remote.capable('stream'):
2516 2528 return self.stream_in(remote, set(('revlogv1',)))
2517 2529 # otherwise, 'streamreqs' contains the remote revlog format
2518 2530 streamreqs = remote.capable('streamreqs')
2519 2531 if streamreqs:
2520 2532 streamreqs = set(streamreqs.split(','))
2521 2533 # if we support it, stream in and adjust our requirements
2522 2534 if not streamreqs - self.supportedformats:
2523 2535 return self.stream_in(remote, streamreqs)
2524 2536 return self.pull(remote, heads)
2525 2537
2526 2538 def pushkey(self, namespace, key, old, new):
2527 2539 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2528 2540 old=old, new=new)
2529 2541 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2530 2542 ret = pushkey.push(self, namespace, key, old, new)
2531 2543 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2532 2544 ret=ret)
2533 2545 return ret
2534 2546
2535 2547 def listkeys(self, namespace):
2536 2548 self.hook('prelistkeys', throw=True, namespace=namespace)
2537 2549 self.ui.debug('listing keys for "%s"\n' % namespace)
2538 2550 values = pushkey.list(self, namespace)
2539 2551 self.hook('listkeys', namespace=namespace, values=values)
2540 2552 return values
2541 2553
2542 2554 def debugwireargs(self, one, two, three=None, four=None, five=None):
2543 2555 '''used to test argument passing over the wire'''
2544 2556 return "%s %s %s %s %s" % (one, two, three, four, five)
2545 2557
2546 2558 def savecommitmessage(self, text):
2547 2559 fp = self.opener('last-message.txt', 'wb')
2548 2560 try:
2549 2561 fp.write(text)
2550 2562 finally:
2551 2563 fp.close()
2552 2564 return self.pathto(fp.name[len(self.root) + 1:])
2553 2565
2554 2566 # used to avoid circular references so destructors work
2555 2567 def aftertrans(files):
2556 2568 renamefiles = [tuple(t) for t in files]
2557 2569 def a():
2558 2570 for src, dest in renamefiles:
2559 2571 try:
2560 2572 util.rename(src, dest)
2561 2573 except OSError: # journal file does not yet exist
2562 2574 pass
2563 2575 return a
2564 2576
2565 2577 def undoname(fn):
2566 2578 base, name = os.path.split(fn)
2567 2579 assert name.startswith('journal')
2568 2580 return os.path.join(base, name.replace('journal', 'undo', 1))
2569 2581
2570 2582 def instance(ui, path, create):
2571 2583 return localrepository(ui, util.urllocalpath(path), create)
2572 2584
2573 2585 def islocal(path):
2574 2586 return True
@@ -1,1139 +1,1137 b''
1 1 $ cat <<EOF >> $HGRCPATH
2 2 > [extensions]
3 3 > keyword =
4 4 > mq =
5 5 > notify =
6 6 > record =
7 7 > transplant =
8 8 > [ui]
9 9 > interactive = true
10 10 > EOF
11 11
12 12 hide outer repo
13 13 $ hg init
14 14
15 15 Run kwdemo before [keyword] files are set up
16 16 as it would succeed without uisetup otherwise
17 17
18 18 $ hg --quiet kwdemo
19 19 [extensions]
20 20 keyword =
21 21 [keyword]
22 22 demo.txt =
23 23 [keywordset]
24 24 svn = False
25 25 [keywordmaps]
26 26 Author = {author|user}
27 27 Date = {date|utcdate}
28 28 Header = {root}/{file},v {node|short} {date|utcdate} {author|user}
29 29 Id = {file|basename},v {node|short} {date|utcdate} {author|user}
30 30 RCSFile = {file|basename},v
31 31 RCSfile = {file|basename},v
32 32 Revision = {node|short}
33 33 Source = {root}/{file},v
34 34 $Author: test $
35 35 $Date: ????/??/?? ??:??:?? $ (glob)
36 36 $Header: */demo.txt,v ???????????? ????/??/?? ??:??:?? test $ (glob)
37 37 $Id: demo.txt,v ???????????? ????/??/?? ??:??:?? test $ (glob)
38 38 $RCSFile: demo.txt,v $
39 39 $RCSfile: demo.txt,v $
40 40 $Revision: ???????????? $ (glob)
41 41 $Source: */demo.txt,v $ (glob)
42 42
43 43 $ hg --quiet kwdemo "Branch = {branches}"
44 44 [extensions]
45 45 keyword =
46 46 [keyword]
47 47 demo.txt =
48 48 [keywordset]
49 49 svn = False
50 50 [keywordmaps]
51 51 Branch = {branches}
52 52 $Branch: demobranch $
53 53
54 54 $ cat <<EOF >> $HGRCPATH
55 55 > [keyword]
56 56 > ** =
57 57 > b = ignore
58 58 > i = ignore
59 59 > [hooks]
60 60 > EOF
61 61 $ cp $HGRCPATH $HGRCPATH.nohooks
62 62 > cat <<EOF >> $HGRCPATH
63 63 > commit=
64 64 > commit.test=cp a hooktest
65 65 > EOF
66 66
67 67 $ hg init Test-bndl
68 68 $ cd Test-bndl
69 69
70 70 kwshrink should exit silently in empty/invalid repo
71 71
72 72 $ hg kwshrink
73 73
74 74 Symlinks cannot be created on Windows.
75 75 A bundle to test this was made with:
76 76 hg init t
77 77 cd t
78 78 echo a > a
79 79 ln -s a sym
80 80 hg add sym
81 81 hg ci -m addsym -u mercurial
82 82 hg bundle --base null ../test-keyword.hg
83 83
84 84 $ hg pull -u "$TESTDIR"/bundles/test-keyword.hg
85 85 pulling from *test-keyword.hg (glob)
86 86 requesting all changes
87 87 adding changesets
88 88 adding manifests
89 89 adding file changes
90 90 added 1 changesets with 1 changes to 1 files
91 91 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 92
93 93 $ echo 'expand $Id$' > a
94 94 $ echo 'do not process $Id:' >> a
95 95 $ echo 'xxx $' >> a
96 96 $ echo 'ignore $Id$' > b
97 97
98 98 Output files as they were created
99 99
100 100 $ cat a b
101 101 expand $Id$
102 102 do not process $Id:
103 103 xxx $
104 104 ignore $Id$
105 105
106 106 no kwfiles
107 107
108 108 $ hg kwfiles
109 109
110 110 untracked candidates
111 111
112 112 $ hg -v kwfiles --unknown
113 113 k a
114 114
115 115 Add files and check status
116 116
117 117 $ hg addremove
118 118 adding a
119 119 adding b
120 120 $ hg status
121 121 A a
122 122 A b
123 123
124 124
125 125 Default keyword expansion including commit hook
126 126 Interrupted commit should not change state or run commit hook
127 127
128 128 $ hg --debug commit
129 129 abort: empty commit message
130 130 [255]
131 131 $ hg status
132 132 A a
133 133 A b
134 134
135 135 Commit with several checks
136 136
137 137 $ hg --debug commit -mabsym -u 'User Name <user@example.com>'
138 138 a
139 139 b
140 140 overwriting a expanding keywords
141 141 running hook commit.test: cp a hooktest
142 142 committed changeset 1:ef63ca68695bc9495032c6fda1350c71e6d256e9
143 143 $ hg status
144 144 ? hooktest
145 145 $ hg debugrebuildstate
146 146 $ hg --quiet identify
147 147 ef63ca68695b
148 148
149 149 cat files in working directory with keywords expanded
150 150
151 151 $ cat a b
152 152 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
153 153 do not process $Id:
154 154 xxx $
155 155 ignore $Id$
156 156
157 157 hg cat files and symlink, no expansion
158 158
159 159 $ hg cat sym a b && echo
160 160 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
161 161 do not process $Id:
162 162 xxx $
163 163 ignore $Id$
164 164 a
165 165
166 166 $ diff a hooktest
167 167
168 168 $ cp $HGRCPATH.nohooks $HGRCPATH
169 169 $ rm hooktest
170 170
171 171 hg status of kw-ignored binary file starting with '\1\n'
172 172
173 173 >>> open("i", "wb").write("\1\nfoo")
174 174 $ hg -q commit -Am metasep i
175 175 $ hg status
176 176 >>> open("i", "wb").write("\1\nbar")
177 177 $ hg status
178 178 M i
179 179 $ hg -q commit -m "modify metasep" i
180 180 $ hg status --rev 2:3
181 181 M i
182 182 $ touch empty
183 183 $ hg -q commit -A -m "another file"
184 184 $ hg status -A --rev 3:4 i
185 185 C i
186 186
187 187 $ hg -q strip -n 2
188 188
189 189 Test hook execution
190 190
191 191 bundle
192 192
193 193 $ hg bundle --base null ../kw.hg
194 194 2 changesets found
195 195 $ cd ..
196 196 $ hg init Test
197 197 $ cd Test
198 198
199 199 Notify on pull to check whether keywords stay as is in email
200 200 ie. if patch.diff wrapper acts as it should
201 201
202 202 $ cat <<EOF >> $HGRCPATH
203 203 > [hooks]
204 204 > incoming.notify = python:hgext.notify.hook
205 205 > [notify]
206 206 > sources = pull
207 207 > diffstat = False
208 208 > maxsubject = 15
209 209 > [reposubs]
210 210 > * = Test
211 211 > EOF
212 212
213 213 Pull from bundle and trigger notify
214 214
215 215 $ hg pull -u ../kw.hg
216 216 pulling from ../kw.hg
217 217 requesting all changes
218 218 adding changesets
219 219 adding manifests
220 220 adding file changes
221 221 added 2 changesets with 3 changes to 3 files
222 222 Content-Type: text/plain; charset="us-ascii"
223 223 MIME-Version: 1.0
224 224 Content-Transfer-Encoding: 7bit
225 225 Date: * (glob)
226 226 Subject: changeset in...
227 227 From: mercurial
228 228 X-Hg-Notification: changeset a2392c293916
229 229 Message-Id: <hg.a2392c293916*> (glob)
230 230 To: Test
231 231
232 232 changeset a2392c293916 in $TESTTMP/Test (glob)
233 233 details: $TESTTMP/Test?cmd=changeset;node=a2392c293916
234 234 description:
235 235 addsym
236 236
237 237 diffs (6 lines):
238 238
239 239 diff -r 000000000000 -r a2392c293916 sym
240 240 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
241 241 +++ b/sym Sat Feb 09 20:25:47 2008 +0100
242 242 @@ -0,0 +1,1 @@
243 243 +a
244 244 \ No newline at end of file
245 245 Content-Type: text/plain; charset="us-ascii"
246 246 MIME-Version: 1.0
247 247 Content-Transfer-Encoding: 7bit
248 248 Date:* (glob)
249 249 Subject: changeset in...
250 250 From: User Name <user@example.com>
251 251 X-Hg-Notification: changeset ef63ca68695b
252 252 Message-Id: <hg.ef63ca68695b*> (glob)
253 253 To: Test
254 254
255 255 changeset ef63ca68695b in $TESTTMP/Test (glob)
256 256 details: $TESTTMP/Test?cmd=changeset;node=ef63ca68695b
257 257 description:
258 258 absym
259 259
260 260 diffs (12 lines):
261 261
262 262 diff -r a2392c293916 -r ef63ca68695b a
263 263 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
264 264 +++ b/a Thu Jan 01 00:00:00 1970 +0000
265 265 @@ -0,0 +1,3 @@
266 266 +expand $Id$
267 267 +do not process $Id:
268 268 +xxx $
269 269 diff -r a2392c293916 -r ef63ca68695b b
270 270 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
271 271 +++ b/b Thu Jan 01 00:00:00 1970 +0000
272 272 @@ -0,0 +1,1 @@
273 273 +ignore $Id$
274 274 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
275 275
276 276 $ cp $HGRCPATH.nohooks $HGRCPATH
277 277
278 278 Touch files and check with status
279 279
280 280 $ touch a b
281 281 $ hg status
282 282
283 283 Update and expand
284 284
285 285 $ rm sym a b
286 286 $ hg update -C
287 287 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
288 288 $ cat a b
289 289 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
290 290 do not process $Id:
291 291 xxx $
292 292 ignore $Id$
293 293
294 294 Check whether expansion is filewise and file mode is preserved
295 295
296 296 $ echo '$Id$' > c
297 297 $ echo 'tests for different changenodes' >> c
298 298 #if unix-permissions
299 299 $ chmod 600 c
300 300 $ ls -l c | cut -b 1-10
301 301 -rw-------
302 302 #endif
303 303
304 304 commit file c
305 305
306 306 $ hg commit -A -mcndiff -d '1 0' -u 'User Name <user@example.com>'
307 307 adding c
308 308 #if unix-permissions
309 309 $ ls -l c | cut -b 1-10
310 310 -rw-------
311 311 #endif
312 312
313 313 force expansion
314 314
315 315 $ hg -v kwexpand
316 316 overwriting a expanding keywords
317 317 overwriting c expanding keywords
318 318
319 319 compare changenodes in a and c
320 320
321 321 $ cat a c
322 322 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
323 323 do not process $Id:
324 324 xxx $
325 325 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
326 326 tests for different changenodes
327 327
328 328 record
329 329
330 330 $ echo '$Id$' > r
331 331 $ hg add r
332 332
333 333 record chunk
334 334
335 335 >>> lines = open('a', 'rb').readlines()
336 336 >>> lines.insert(1, 'foo\n')
337 337 >>> lines.append('bar\n')
338 338 >>> open('a', 'wb').writelines(lines)
339 339 $ hg record -d '10 1' -m rectest a<<EOF
340 340 > y
341 341 > y
342 342 > n
343 343 > EOF
344 344 diff --git a/a b/a
345 345 2 hunks, 2 lines changed
346 346 examine changes to 'a'? [Ynesfdaq?]
347 347 @@ -1,3 +1,4 @@
348 348 expand $Id$
349 349 +foo
350 350 do not process $Id:
351 351 xxx $
352 352 record change 1/2 to 'a'? [Ynesfdaq?]
353 353 @@ -2,2 +3,3 @@
354 354 do not process $Id:
355 355 xxx $
356 356 +bar
357 357 record change 2/2 to 'a'? [Ynesfdaq?]
358 358
359 359 $ hg identify
360 360 5f5eb23505c3+ tip
361 361 $ hg status
362 362 M a
363 363 A r
364 364
365 365 Cat modified file a
366 366
367 367 $ cat a
368 368 expand $Id: a,v 5f5eb23505c3 1970/01/01 00:00:10 test $
369 369 foo
370 370 do not process $Id:
371 371 xxx $
372 372 bar
373 373
374 374 Diff remaining chunk
375 375
376 376 $ hg diff a
377 377 diff -r 5f5eb23505c3 a
378 378 --- a/a Thu Jan 01 00:00:09 1970 -0000
379 379 +++ b/a * (glob)
380 380 @@ -2,3 +2,4 @@
381 381 foo
382 382 do not process $Id:
383 383 xxx $
384 384 +bar
385 385
386 386 $ hg rollback
387 387 repository tip rolled back to revision 2 (undo commit)
388 388 working directory now based on revision 2
389 389
390 390 Record all chunks in file a
391 391
392 392 $ echo foo > msg
393 393
394 394 - do not use "hg record -m" here!
395 395
396 396 $ hg record -l msg -d '11 1' a<<EOF
397 397 > y
398 398 > y
399 399 > y
400 400 > EOF
401 401 diff --git a/a b/a
402 402 2 hunks, 2 lines changed
403 403 examine changes to 'a'? [Ynesfdaq?]
404 404 @@ -1,3 +1,4 @@
405 405 expand $Id$
406 406 +foo
407 407 do not process $Id:
408 408 xxx $
409 409 record change 1/2 to 'a'? [Ynesfdaq?]
410 410 @@ -2,2 +3,3 @@
411 411 do not process $Id:
412 412 xxx $
413 413 +bar
414 414 record change 2/2 to 'a'? [Ynesfdaq?]
415 415
416 416 File a should be clean
417 417
418 418 $ hg status -A a
419 419 C a
420 420
421 421 rollback and revert expansion
422 422
423 423 $ cat a
424 424 expand $Id: a,v 78e0a02d76aa 1970/01/01 00:00:11 test $
425 425 foo
426 426 do not process $Id:
427 427 xxx $
428 428 bar
429 429 $ hg --verbose rollback
430 430 repository tip rolled back to revision 2 (undo commit)
431 431 working directory now based on revision 2
432 432 overwriting a expanding keywords
433 433 $ hg status a
434 434 M a
435 435 $ cat a
436 436 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
437 437 foo
438 438 do not process $Id:
439 439 xxx $
440 440 bar
441 441 $ echo '$Id$' > y
442 442 $ echo '$Id$' > z
443 443 $ hg add y
444 444 $ hg commit -Am "rollback only" z
445 445 $ cat z
446 446 $Id: z,v 45a5d3adce53 1970/01/01 00:00:00 test $
447 447 $ hg --verbose rollback
448 448 repository tip rolled back to revision 2 (undo commit)
449 449 working directory now based on revision 2
450 450 overwriting z shrinking keywords
451 451
452 452 Only z should be overwritten
453 453
454 454 $ hg status a y z
455 455 M a
456 456 A y
457 457 A z
458 458 $ cat z
459 459 $Id$
460 460 $ hg forget y z
461 461 $ rm y z
462 462
463 463 record added file alone
464 464
465 465 $ hg -v record -l msg -d '12 2' r<<EOF
466 466 > y
467 467 > EOF
468 468 diff --git a/r b/r
469 469 new file mode 100644
470 470 examine changes to 'r'? [Ynesfdaq?]
471 471 r
472 472 committed changeset 3:82a2f715724d
473 473 overwriting r expanding keywords
474 474 - status call required for dirstate.normallookup() check
475 475 $ hg status r
476 476 $ hg --verbose rollback
477 477 repository tip rolled back to revision 2 (undo commit)
478 478 working directory now based on revision 2
479 479 overwriting r shrinking keywords
480 480 $ hg forget r
481 481 $ rm msg r
482 482 $ hg update -C
483 483 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
484 484
485 485 record added keyword ignored file
486 486
487 487 $ echo '$Id$' > i
488 488 $ hg add i
489 489 $ hg --verbose record -d '13 1' -m recignored<<EOF
490 490 > y
491 491 > EOF
492 492 diff --git a/i b/i
493 493 new file mode 100644
494 494 examine changes to 'i'? [Ynesfdaq?]
495 495 i
496 496 committed changeset 3:9f40ceb5a072
497 497 $ cat i
498 498 $Id$
499 499 $ hg -q rollback
500 500 $ hg forget i
501 501 $ rm i
502 502
503 503 amend
504 504
505 505 $ echo amend >> a
506 506 $ echo amend >> b
507 507 $ hg -q commit -d '14 1' -m 'prepare amend'
508 508
509 509 $ hg --debug commit --amend -d '15 1' -m 'amend without changes' | grep keywords
510 510 overwriting a expanding keywords
511 511 $ hg -q id
512 512 67d8c481a6be
513 513 $ head -1 a
514 514 expand $Id: a,v 67d8c481a6be 1970/01/01 00:00:15 test $
515 515
516 516 $ hg -q strip -n tip
517 517
518 518 Test patch queue repo
519 519
520 520 $ hg init --mq
521 521 $ hg qimport -r tip -n mqtest.diff
522 522 $ hg commit --mq -m mqtest
523 523
524 524 Keywords should not be expanded in patch
525 525
526 526 $ cat .hg/patches/mqtest.diff
527 527 # HG changeset patch
528 528 # User User Name <user@example.com>
529 529 # Date 1 0
530 530 # Node ID 40a904bbbe4cd4ab0a1f28411e35db26341a40ad
531 531 # Parent ef63ca68695bc9495032c6fda1350c71e6d256e9
532 532 cndiff
533 533
534 534 diff -r ef63ca68695b -r 40a904bbbe4c c
535 535 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
536 536 +++ b/c Thu Jan 01 00:00:01 1970 +0000
537 537 @@ -0,0 +1,2 @@
538 538 +$Id$
539 539 +tests for different changenodes
540 540
541 541 $ hg qpop
542 542 popping mqtest.diff
543 543 patch queue now empty
544 544
545 545 qgoto, implying qpush, should expand
546 546
547 547 $ hg qgoto mqtest.diff
548 548 applying mqtest.diff
549 549 now at: mqtest.diff
550 550 $ cat c
551 551 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
552 552 tests for different changenodes
553 553 $ hg cat c
554 554 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
555 555 tests for different changenodes
556 556
557 557 Keywords should not be expanded in filelog
558 558
559 559 $ hg --config 'extensions.keyword=!' cat c
560 560 $Id$
561 561 tests for different changenodes
562 562
563 563 qpop and move on
564 564
565 565 $ hg qpop
566 566 popping mqtest.diff
567 567 patch queue now empty
568 568
569 569 Copy and show added kwfiles
570 570
571 571 $ hg cp a c
572 572 $ hg kwfiles
573 573 a
574 574 c
575 575
576 576 Commit and show expansion in original and copy
577 577
578 578 $ hg --debug commit -ma2c -d '1 0' -u 'User Name <user@example.com>'
579 579 c
580 580 c: copy a:0045e12f6c5791aac80ca6cbfd97709a88307292
581 removing unknown node 40a904bbbe4c from 1-phase boundary
582 581 overwriting c expanding keywords
583 582 committed changeset 2:25736cf2f5cbe41f6be4e6784ef6ecf9f3bbcc7d
584 583 $ cat a c
585 584 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
586 585 do not process $Id:
587 586 xxx $
588 587 expand $Id: c,v 25736cf2f5cb 1970/01/01 00:00:01 user $
589 588 do not process $Id:
590 589 xxx $
591 590
592 591 Touch copied c and check its status
593 592
594 593 $ touch c
595 594 $ hg status
596 595
597 596 Copy kwfile to keyword ignored file unexpanding keywords
598 597
599 598 $ hg --verbose copy a i
600 599 copying a to i
601 600 overwriting i shrinking keywords
602 601 $ head -n 1 i
603 602 expand $Id$
604 603 $ hg forget i
605 604 $ rm i
606 605
607 606 Copy ignored file to ignored file: no overwriting
608 607
609 608 $ hg --verbose copy b i
610 609 copying b to i
611 610 $ hg forget i
612 611 $ rm i
613 612
614 613 cp symlink file; hg cp -A symlink file (part1)
615 614 - copied symlink points to kwfile: overwrite
616 615
617 616 #if symlink
618 617 $ cp sym i
619 618 $ ls -l i
620 619 -rw-r--r--* (glob)
621 620 $ head -1 i
622 621 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
623 622 $ hg copy --after --verbose sym i
624 623 copying sym to i
625 624 overwriting i shrinking keywords
626 625 $ head -1 i
627 626 expand $Id$
628 627 $ hg forget i
629 628 $ rm i
630 629 #endif
631 630
632 631 Test different options of hg kwfiles
633 632
634 633 $ hg kwfiles
635 634 a
636 635 c
637 636 $ hg -v kwfiles --ignore
638 637 I b
639 638 I sym
640 639 $ hg kwfiles --all
641 640 K a
642 641 K c
643 642 I b
644 643 I sym
645 644
646 645 Diff specific revision
647 646
648 647 $ hg diff --rev 1
649 648 diff -r ef63ca68695b c
650 649 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
651 650 +++ b/c * (glob)
652 651 @@ -0,0 +1,3 @@
653 652 +expand $Id$
654 653 +do not process $Id:
655 654 +xxx $
656 655
657 656 Status after rollback:
658 657
659 658 $ hg rollback
660 659 repository tip rolled back to revision 1 (undo commit)
661 660 working directory now based on revision 1
662 661 $ hg status
663 662 A c
664 663 $ hg update --clean
665 664 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
666 665
667 666 #if symlink
668 667
669 668 cp symlink file; hg cp -A symlink file (part2)
670 669 - copied symlink points to kw ignored file: do not overwrite
671 670
672 671 $ cat a > i
673 672 $ ln -s i symignored
674 673 $ hg commit -Am 'fake expansion in ignored and symlink' i symignored
675 674 $ cp symignored x
676 675 $ hg copy --after --verbose symignored x
677 676 copying symignored to x
678 677 $ head -n 1 x
679 678 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
680 679 $ hg forget x
681 680 $ rm x
682 681
683 682 $ hg rollback
684 683 repository tip rolled back to revision 1 (undo commit)
685 684 working directory now based on revision 1
686 685 $ hg update --clean
687 686 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
688 687 $ rm i symignored
689 688
690 689 #endif
691 690
692 691 Custom keywordmaps as argument to kwdemo
693 692
694 693 $ hg --quiet kwdemo "Xinfo = {author}: {desc}"
695 694 [extensions]
696 695 keyword =
697 696 [keyword]
698 697 ** =
699 698 b = ignore
700 699 demo.txt =
701 700 i = ignore
702 701 [keywordset]
703 702 svn = False
704 703 [keywordmaps]
705 704 Xinfo = {author}: {desc}
706 705 $Xinfo: test: hg keyword configuration and expansion example $
707 706
708 707 Configure custom keywordmaps
709 708
710 709 $ cat <<EOF >>$HGRCPATH
711 710 > [keywordmaps]
712 711 > Id = {file} {node|short} {date|rfc822date} {author|user}
713 712 > Xinfo = {author}: {desc}
714 713 > EOF
715 714
716 715 Cat and hg cat files before custom expansion
717 716
718 717 $ cat a b
719 718 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
720 719 do not process $Id:
721 720 xxx $
722 721 ignore $Id$
723 722 $ hg cat sym a b && echo
724 723 expand $Id: a ef63ca68695b Thu, 01 Jan 1970 00:00:00 +0000 user $
725 724 do not process $Id:
726 725 xxx $
727 726 ignore $Id$
728 727 a
729 728
730 729 Write custom keyword and prepare multi-line commit message
731 730
732 731 $ echo '$Xinfo$' >> a
733 732 $ cat <<EOF >> log
734 733 > firstline
735 734 > secondline
736 735 > EOF
737 736
738 737 Interrupted commit should not change state
739 738
740 739 $ hg commit
741 740 abort: empty commit message
742 741 [255]
743 742 $ hg status
744 743 M a
745 744 ? c
746 745 ? log
747 746
748 747 Commit with multi-line message and custom expansion
749 748
750 749 $ hg --debug commit -l log -d '2 0' -u 'User Name <user@example.com>'
751 750 a
752 removing unknown node 40a904bbbe4c from 1-phase boundary
753 751 overwriting a expanding keywords
754 752 committed changeset 2:bb948857c743469b22bbf51f7ec8112279ca5d83
755 753 $ rm log
756 754
757 755 Stat, verify and show custom expansion (firstline)
758 756
759 757 $ hg status
760 758 ? c
761 759 $ hg verify
762 760 checking changesets
763 761 checking manifests
764 762 crosschecking files in changesets and manifests
765 763 checking files
766 764 3 files, 3 changesets, 4 total revisions
767 765 $ cat a b
768 766 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
769 767 do not process $Id:
770 768 xxx $
771 769 $Xinfo: User Name <user@example.com>: firstline $
772 770 ignore $Id$
773 771 $ hg cat sym a b && echo
774 772 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
775 773 do not process $Id:
776 774 xxx $
777 775 $Xinfo: User Name <user@example.com>: firstline $
778 776 ignore $Id$
779 777 a
780 778
781 779 annotate
782 780
783 781 $ hg annotate a
784 782 1: expand $Id$
785 783 1: do not process $Id:
786 784 1: xxx $
787 785 2: $Xinfo$
788 786
789 787 remove with status checks
790 788
791 789 $ hg debugrebuildstate
792 790 $ hg remove a
793 791 $ hg --debug commit -m rma
794 792 committed changeset 3:d14c712653769de926994cf7fbb06c8fbd68f012
795 793 $ hg status
796 794 ? c
797 795
798 796 Rollback, revert, and check expansion
799 797
800 798 $ hg rollback
801 799 repository tip rolled back to revision 2 (undo commit)
802 800 working directory now based on revision 2
803 801 $ hg status
804 802 R a
805 803 ? c
806 804 $ hg revert --no-backup --rev tip a
807 805 $ cat a
808 806 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
809 807 do not process $Id:
810 808 xxx $
811 809 $Xinfo: User Name <user@example.com>: firstline $
812 810
813 811 Clone to test global and local configurations
814 812
815 813 $ cd ..
816 814
817 815 Expansion in destination with global configuration
818 816
819 817 $ hg --quiet clone Test globalconf
820 818 $ cat globalconf/a
821 819 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
822 820 do not process $Id:
823 821 xxx $
824 822 $Xinfo: User Name <user@example.com>: firstline $
825 823
826 824 No expansion in destination with local configuration in origin only
827 825
828 826 $ hg --quiet --config 'keyword.**=ignore' clone Test localconf
829 827 $ cat localconf/a
830 828 expand $Id$
831 829 do not process $Id:
832 830 xxx $
833 831 $Xinfo$
834 832
835 833 Clone to test incoming
836 834
837 835 $ hg clone -r1 Test Test-a
838 836 adding changesets
839 837 adding manifests
840 838 adding file changes
841 839 added 2 changesets with 3 changes to 3 files
842 840 updating to branch default
843 841 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
844 842 $ cd Test-a
845 843 $ cat <<EOF >> .hg/hgrc
846 844 > [paths]
847 845 > default = ../Test
848 846 > EOF
849 847 $ hg incoming
850 848 comparing with $TESTTMP/Test (glob)
851 849 searching for changes
852 850 changeset: 2:bb948857c743
853 851 tag: tip
854 852 user: User Name <user@example.com>
855 853 date: Thu Jan 01 00:00:02 1970 +0000
856 854 summary: firstline
857 855
858 856 Imported patch should not be rejected
859 857
860 858 >>> import re
861 859 >>> text = re.sub(r'(Id.*)', r'\1 rejecttest', open('a').read())
862 860 >>> open('a', 'wb').write(text)
863 861 $ hg --debug commit -m'rejects?' -d '3 0' -u 'User Name <user@example.com>'
864 862 a
865 863 overwriting a expanding keywords
866 864 committed changeset 2:85e279d709ffc28c9fdd1b868570985fc3d87082
867 865 $ hg export -o ../rejecttest.diff tip
868 866 $ cd ../Test
869 867 $ hg import ../rejecttest.diff
870 868 applying ../rejecttest.diff
871 869 $ cat a b
872 870 expand $Id: a 4e0994474d25 Thu, 01 Jan 1970 00:00:03 +0000 user $ rejecttest
873 871 do not process $Id: rejecttest
874 872 xxx $
875 873 $Xinfo: User Name <user@example.com>: rejects? $
876 874 ignore $Id$
877 875
878 876 $ hg rollback
879 877 repository tip rolled back to revision 2 (undo import)
880 878 working directory now based on revision 2
881 879 $ hg update --clean
882 880 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
883 881
884 882 kwexpand/kwshrink on selected files
885 883
886 884 $ mkdir x
887 885 $ hg copy a x/a
888 886 $ hg --verbose kwshrink a
889 887 overwriting a shrinking keywords
890 888 - sleep required for dirstate.normal() check
891 889 $ sleep 1
892 890 $ hg status a
893 891 $ hg --verbose kwexpand a
894 892 overwriting a expanding keywords
895 893 $ hg status a
896 894
897 895 kwexpand x/a should abort
898 896
899 897 $ hg --verbose kwexpand x/a
900 898 abort: outstanding uncommitted changes
901 899 [255]
902 900 $ cd x
903 901 $ hg --debug commit -m xa -d '3 0' -u 'User Name <user@example.com>'
904 902 x/a
905 903 x/a: copy a:779c764182ce5d43e2b1eb66ce06d7b47bfe342e
906 904 overwriting x/a expanding keywords
907 905 committed changeset 3:b4560182a3f9a358179fd2d835c15e9da379c1e4
908 906 $ cat a
909 907 expand $Id: x/a b4560182a3f9 Thu, 01 Jan 1970 00:00:03 +0000 user $
910 908 do not process $Id:
911 909 xxx $
912 910 $Xinfo: User Name <user@example.com>: xa $
913 911
914 912 kwshrink a inside directory x
915 913
916 914 $ hg --verbose kwshrink a
917 915 overwriting x/a shrinking keywords
918 916 $ cat a
919 917 expand $Id$
920 918 do not process $Id:
921 919 xxx $
922 920 $Xinfo$
923 921 $ cd ..
924 922
925 923 kwexpand nonexistent
926 924
927 925 $ hg kwexpand nonexistent
928 926 nonexistent:* (glob)
929 927
930 928
931 929 #if serve
932 930 hg serve
933 931 - expand with hgweb file
934 932 - no expansion with hgweb annotate/changeset/filediff
935 933 - check errors
936 934
937 935 $ hg serve -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
938 936 $ cat hg.pid >> $DAEMON_PIDS
939 937 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'file/tip/a/?style=raw'
940 938 200 Script output follows
941 939
942 940 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
943 941 do not process $Id:
944 942 xxx $
945 943 $Xinfo: User Name <user@example.com>: firstline $
946 944 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'annotate/tip/a/?style=raw'
947 945 200 Script output follows
948 946
949 947
950 948 user@1: expand $Id$
951 949 user@1: do not process $Id:
952 950 user@1: xxx $
953 951 user@2: $Xinfo$
954 952
955 953
956 954
957 955
958 956 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'rev/tip/?style=raw'
959 957 200 Script output follows
960 958
961 959
962 960 # HG changeset patch
963 961 # User User Name <user@example.com>
964 962 # Date 3 0
965 963 # Node ID b4560182a3f9a358179fd2d835c15e9da379c1e4
966 964 # Parent bb948857c743469b22bbf51f7ec8112279ca5d83
967 965 xa
968 966
969 967 diff -r bb948857c743 -r b4560182a3f9 x/a
970 968 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
971 969 +++ b/x/a Thu Jan 01 00:00:03 1970 +0000
972 970 @@ -0,0 +1,4 @@
973 971 +expand $Id$
974 972 +do not process $Id:
975 973 +xxx $
976 974 +$Xinfo$
977 975
978 976 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'diff/bb948857c743/a?style=raw'
979 977 200 Script output follows
980 978
981 979
982 980 diff -r ef63ca68695b -r bb948857c743 a
983 981 --- a/a Thu Jan 01 00:00:00 1970 +0000
984 982 +++ b/a Thu Jan 01 00:00:02 1970 +0000
985 983 @@ -1,3 +1,4 @@
986 984 expand $Id$
987 985 do not process $Id:
988 986 xxx $
989 987 +$Xinfo$
990 988
991 989
992 990
993 991
994 992 $ cat errors.log
995 993 #endif
996 994
997 995 Prepare merge and resolve tests
998 996
999 997 $ echo '$Id$' > m
1000 998 $ hg add m
1001 999 $ hg commit -m 4kw
1002 1000 $ echo foo >> m
1003 1001 $ hg commit -m 5foo
1004 1002
1005 1003 simplemerge
1006 1004
1007 1005 $ hg update 4
1008 1006 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1009 1007 $ echo foo >> m
1010 1008 $ hg commit -m 6foo
1011 1009 created new head
1012 1010 $ hg merge
1013 1011 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1014 1012 (branch merge, don't forget to commit)
1015 1013 $ hg commit -m simplemerge
1016 1014 $ cat m
1017 1015 $Id: m 27d48ee14f67 Thu, 01 Jan 1970 00:00:00 +0000 test $
1018 1016 foo
1019 1017
1020 1018 conflict: keyword should stay outside conflict zone
1021 1019
1022 1020 $ hg update 4
1023 1021 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1024 1022 $ echo bar >> m
1025 1023 $ hg commit -m 8bar
1026 1024 created new head
1027 1025 $ hg merge
1028 1026 merging m
1029 1027 warning: conflicts during merge.
1030 1028 merging m incomplete! (edit conflicts, then use 'hg resolve --mark')
1031 1029 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
1032 1030 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
1033 1031 [1]
1034 1032 $ cat m
1035 1033 $Id$
1036 1034 <<<<<<< local
1037 1035 bar
1038 1036 =======
1039 1037 foo
1040 1038 >>>>>>> other
1041 1039
1042 1040 resolve to local
1043 1041
1044 1042 $ HGMERGE=internal:local hg resolve -a
1045 1043 $ hg commit -m localresolve
1046 1044 $ cat m
1047 1045 $Id: m 800511b3a22d Thu, 01 Jan 1970 00:00:00 +0000 test $
1048 1046 bar
1049 1047
1050 1048 Test restricted mode with transplant -b
1051 1049
1052 1050 $ hg update 6
1053 1051 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1054 1052 $ hg branch foo
1055 1053 marked working directory as branch foo
1056 1054 (branches are permanent and global, did you want a bookmark?)
1057 1055 $ mv a a.bak
1058 1056 $ echo foobranch > a
1059 1057 $ cat a.bak >> a
1060 1058 $ rm a.bak
1061 1059 $ hg commit -m 9foobranch
1062 1060 $ hg update default
1063 1061 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1064 1062 $ hg -y transplant -b foo tip
1065 1063 applying 4aa30d025d50
1066 1064 4aa30d025d50 transplanted to e00abbf63521
1067 1065
1068 1066 Expansion in changeset but not in file
1069 1067
1070 1068 $ hg tip -p
1071 1069 changeset: 11:e00abbf63521
1072 1070 tag: tip
1073 1071 parent: 9:800511b3a22d
1074 1072 user: test
1075 1073 date: Thu Jan 01 00:00:00 1970 +0000
1076 1074 summary: 9foobranch
1077 1075
1078 1076 diff -r 800511b3a22d -r e00abbf63521 a
1079 1077 --- a/a Thu Jan 01 00:00:00 1970 +0000
1080 1078 +++ b/a Thu Jan 01 00:00:00 1970 +0000
1081 1079 @@ -1,3 +1,4 @@
1082 1080 +foobranch
1083 1081 expand $Id$
1084 1082 do not process $Id:
1085 1083 xxx $
1086 1084
1087 1085 $ head -n 2 a
1088 1086 foobranch
1089 1087 expand $Id: a e00abbf63521 Thu, 01 Jan 1970 00:00:00 +0000 test $
1090 1088
1091 1089 Turn off expansion
1092 1090
1093 1091 $ hg -q rollback
1094 1092 $ hg -q update -C
1095 1093
1096 1094 kwshrink with unknown file u
1097 1095
1098 1096 $ cp a u
1099 1097 $ hg --verbose kwshrink
1100 1098 overwriting a shrinking keywords
1101 1099 overwriting m shrinking keywords
1102 1100 overwriting x/a shrinking keywords
1103 1101
1104 1102 Keywords shrunk in working directory, but not yet disabled
1105 1103 - cat shows unexpanded keywords
1106 1104 - hg cat shows expanded keywords
1107 1105
1108 1106 $ cat a b
1109 1107 expand $Id$
1110 1108 do not process $Id:
1111 1109 xxx $
1112 1110 $Xinfo$
1113 1111 ignore $Id$
1114 1112 $ hg cat sym a b && echo
1115 1113 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
1116 1114 do not process $Id:
1117 1115 xxx $
1118 1116 $Xinfo: User Name <user@example.com>: firstline $
1119 1117 ignore $Id$
1120 1118 a
1121 1119
1122 1120 Now disable keyword expansion
1123 1121
1124 1122 $ rm "$HGRCPATH"
1125 1123 $ cat a b
1126 1124 expand $Id$
1127 1125 do not process $Id:
1128 1126 xxx $
1129 1127 $Xinfo$
1130 1128 ignore $Id$
1131 1129 $ hg cat sym a b && echo
1132 1130 expand $Id$
1133 1131 do not process $Id:
1134 1132 xxx $
1135 1133 $Xinfo$
1136 1134 ignore $Id$
1137 1135 a
1138 1136
1139 1137 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now