##// END OF EJS Templates
branchmap: update cache of 'unserved' filter on new changesets...
Pierre-Yves David -
r18394:50104481 default
parent child Browse files
Show More
@@ -1,2578 +1,2578 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('served')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150 filtername = None
151 151
152 152 def _baserequirements(self, create):
153 153 return self.requirements[:]
154 154
155 155 def __init__(self, baseui, path=None, create=False):
156 156 self.wvfs = scmutil.vfs(path, expand=True)
157 157 self.wopener = self.wvfs
158 158 self.root = self.wvfs.base
159 159 self.path = self.wvfs.join(".hg")
160 160 self.origroot = path
161 161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 162 self.vfs = scmutil.vfs(self.path)
163 163 self.opener = self.vfs
164 164 self.baseui = baseui
165 165 self.ui = baseui.copy()
166 166 # A list of callback to shape the phase if no data were found.
167 167 # Callback are in the form: func(repo, roots) --> processed root.
168 168 # This list it to be filled by extension during repo setup
169 169 self._phasedefaults = []
170 170 try:
171 171 self.ui.readconfig(self.join("hgrc"), self.root)
172 172 extensions.loadall(self.ui)
173 173 except IOError:
174 174 pass
175 175
176 176 if not self.vfs.isdir():
177 177 if create:
178 178 if not self.wvfs.exists():
179 179 self.wvfs.makedirs()
180 180 self.vfs.makedir(notindexed=True)
181 181 requirements = self._baserequirements(create)
182 182 if self.ui.configbool('format', 'usestore', True):
183 183 self.vfs.mkdir("store")
184 184 requirements.append("store")
185 185 if self.ui.configbool('format', 'usefncache', True):
186 186 requirements.append("fncache")
187 187 if self.ui.configbool('format', 'dotencode', True):
188 188 requirements.append('dotencode')
189 189 # create an invalid changelog
190 190 self.vfs.append(
191 191 "00changelog.i",
192 192 '\0\0\0\2' # represents revlogv2
193 193 ' dummy changelog to prevent using the old repo layout'
194 194 )
195 195 if self.ui.configbool('format', 'generaldelta', False):
196 196 requirements.append("generaldelta")
197 197 requirements = set(requirements)
198 198 else:
199 199 raise error.RepoError(_("repository %s not found") % path)
200 200 elif create:
201 201 raise error.RepoError(_("repository %s already exists") % path)
202 202 else:
203 203 try:
204 204 requirements = scmutil.readrequires(self.vfs, self.supported)
205 205 except IOError, inst:
206 206 if inst.errno != errno.ENOENT:
207 207 raise
208 208 requirements = set()
209 209
210 210 self.sharedpath = self.path
211 211 try:
212 212 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
213 213 if not os.path.exists(s):
214 214 raise error.RepoError(
215 215 _('.hg/sharedpath points to nonexistent directory %s') % s)
216 216 self.sharedpath = s
217 217 except IOError, inst:
218 218 if inst.errno != errno.ENOENT:
219 219 raise
220 220
221 221 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
222 222 self.spath = self.store.path
223 223 self.svfs = self.store.vfs
224 224 self.sopener = self.svfs
225 225 self.sjoin = self.store.join
226 226 self.vfs.createmode = self.store.createmode
227 227 self._applyrequirements(requirements)
228 228 if create:
229 229 self._writerequirements()
230 230
231 231
232 232 self._branchcaches = {}
233 233 self.filterpats = {}
234 234 self._datafilters = {}
235 235 self._transref = self._lockref = self._wlockref = None
236 236
237 237 # A cache for various files under .hg/ that tracks file changes,
238 238 # (used by the filecache decorator)
239 239 #
240 240 # Maps a property name to its util.filecacheentry
241 241 self._filecache = {}
242 242
243 243 # hold sets of revision to be filtered
244 244 # should be cleared when something might have changed the filter value:
245 245 # - new changesets,
246 246 # - phase change,
247 247 # - new obsolescence marker,
248 248 # - working directory parent change,
249 249 # - bookmark changes
250 250 self.filteredrevcache = {}
251 251
252 252 def close(self):
253 253 pass
254 254
255 255 def _restrictcapabilities(self, caps):
256 256 return caps
257 257
258 258 def _applyrequirements(self, requirements):
259 259 self.requirements = requirements
260 260 self.sopener.options = dict((r, 1) for r in requirements
261 261 if r in self.openerreqs)
262 262
263 263 def _writerequirements(self):
264 264 reqfile = self.opener("requires", "w")
265 265 for r in sorted(self.requirements):
266 266 reqfile.write("%s\n" % r)
267 267 reqfile.close()
268 268
269 269 def _checknested(self, path):
270 270 """Determine if path is a legal nested repository."""
271 271 if not path.startswith(self.root):
272 272 return False
273 273 subpath = path[len(self.root) + 1:]
274 274 normsubpath = util.pconvert(subpath)
275 275
276 276 # XXX: Checking against the current working copy is wrong in
277 277 # the sense that it can reject things like
278 278 #
279 279 # $ hg cat -r 10 sub/x.txt
280 280 #
281 281 # if sub/ is no longer a subrepository in the working copy
282 282 # parent revision.
283 283 #
284 284 # However, it can of course also allow things that would have
285 285 # been rejected before, such as the above cat command if sub/
286 286 # is a subrepository now, but was a normal directory before.
287 287 # The old path auditor would have rejected by mistake since it
288 288 # panics when it sees sub/.hg/.
289 289 #
290 290 # All in all, checking against the working copy seems sensible
291 291 # since we want to prevent access to nested repositories on
292 292 # the filesystem *now*.
293 293 ctx = self[None]
294 294 parts = util.splitpath(subpath)
295 295 while parts:
296 296 prefix = '/'.join(parts)
297 297 if prefix in ctx.substate:
298 298 if prefix == normsubpath:
299 299 return True
300 300 else:
301 301 sub = ctx.sub(prefix)
302 302 return sub.checknested(subpath[len(prefix) + 1:])
303 303 else:
304 304 parts.pop()
305 305 return False
306 306
307 307 def peer(self):
308 308 return localpeer(self) # not cached to avoid reference cycle
309 309
310 310 def unfiltered(self):
311 311 """Return unfiltered version of the repository
312 312
313 313 Intended to be ovewritten by filtered repo."""
314 314 return self
315 315
316 316 def filtered(self, name):
317 317 """Return a filtered version of a repository"""
318 318 # build a new class with the mixin and the current class
319 319 # (possibily subclass of the repo)
320 320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 321 pass
322 322 return proxycls(self, name)
323 323
324 324 @repofilecache('bookmarks')
325 325 def _bookmarks(self):
326 326 return bookmarks.bmstore(self)
327 327
328 328 @repofilecache('bookmarks.current')
329 329 def _bookmarkcurrent(self):
330 330 return bookmarks.readcurrent(self)
331 331
332 332 def bookmarkheads(self, bookmark):
333 333 name = bookmark.split('@', 1)[0]
334 334 heads = []
335 335 for mark, n in self._bookmarks.iteritems():
336 336 if mark.split('@', 1)[0] == name:
337 337 heads.append(n)
338 338 return heads
339 339
340 340 @storecache('phaseroots')
341 341 def _phasecache(self):
342 342 return phases.phasecache(self, self._phasedefaults)
343 343
344 344 @storecache('obsstore')
345 345 def obsstore(self):
346 346 store = obsolete.obsstore(self.sopener)
347 347 if store and not obsolete._enabled:
348 348 # message is rare enough to not be translated
349 349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 350 self.ui.warn(msg % len(list(store)))
351 351 return store
352 352
353 353 @storecache('00changelog.i')
354 354 def changelog(self):
355 355 c = changelog.changelog(self.sopener)
356 356 if 'HG_PENDING' in os.environ:
357 357 p = os.environ['HG_PENDING']
358 358 if p.startswith(self.root):
359 359 c.readpending('00changelog.i.a')
360 360 return c
361 361
362 362 @storecache('00manifest.i')
363 363 def manifest(self):
364 364 return manifest.manifest(self.sopener)
365 365
366 366 @repofilecache('dirstate')
367 367 def dirstate(self):
368 368 warned = [0]
369 369 def validate(node):
370 370 try:
371 371 self.changelog.rev(node)
372 372 return node
373 373 except error.LookupError:
374 374 if not warned[0]:
375 375 warned[0] = True
376 376 self.ui.warn(_("warning: ignoring unknown"
377 377 " working parent %s!\n") % short(node))
378 378 return nullid
379 379
380 380 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
381 381
382 382 def __getitem__(self, changeid):
383 383 if changeid is None:
384 384 return context.workingctx(self)
385 385 return context.changectx(self, changeid)
386 386
387 387 def __contains__(self, changeid):
388 388 try:
389 389 return bool(self.lookup(changeid))
390 390 except error.RepoLookupError:
391 391 return False
392 392
393 393 def __nonzero__(self):
394 394 return True
395 395
396 396 def __len__(self):
397 397 return len(self.changelog)
398 398
399 399 def __iter__(self):
400 400 return iter(self.changelog)
401 401
402 402 def revs(self, expr, *args):
403 403 '''Return a list of revisions matching the given revset'''
404 404 expr = revset.formatspec(expr, *args)
405 405 m = revset.match(None, expr)
406 406 return [r for r in m(self, list(self))]
407 407
408 408 def set(self, expr, *args):
409 409 '''
410 410 Yield a context for each matching revision, after doing arg
411 411 replacement via revset.formatspec
412 412 '''
413 413 for r in self.revs(expr, *args):
414 414 yield self[r]
415 415
416 416 def url(self):
417 417 return 'file:' + self.root
418 418
419 419 def hook(self, name, throw=False, **args):
420 420 return hook.hook(self.ui, self, name, throw, **args)
421 421
422 422 @unfilteredmethod
423 423 def _tag(self, names, node, message, local, user, date, extra={}):
424 424 if isinstance(names, str):
425 425 names = (names,)
426 426
427 427 branches = self.branchmap()
428 428 for name in names:
429 429 self.hook('pretag', throw=True, node=hex(node), tag=name,
430 430 local=local)
431 431 if name in branches:
432 432 self.ui.warn(_("warning: tag %s conflicts with existing"
433 433 " branch name\n") % name)
434 434
435 435 def writetags(fp, names, munge, prevtags):
436 436 fp.seek(0, 2)
437 437 if prevtags and prevtags[-1] != '\n':
438 438 fp.write('\n')
439 439 for name in names:
440 440 m = munge and munge(name) or name
441 441 if (self._tagscache.tagtypes and
442 442 name in self._tagscache.tagtypes):
443 443 old = self.tags().get(name, nullid)
444 444 fp.write('%s %s\n' % (hex(old), m))
445 445 fp.write('%s %s\n' % (hex(node), m))
446 446 fp.close()
447 447
448 448 prevtags = ''
449 449 if local:
450 450 try:
451 451 fp = self.opener('localtags', 'r+')
452 452 except IOError:
453 453 fp = self.opener('localtags', 'a')
454 454 else:
455 455 prevtags = fp.read()
456 456
457 457 # local tags are stored in the current charset
458 458 writetags(fp, names, None, prevtags)
459 459 for name in names:
460 460 self.hook('tag', node=hex(node), tag=name, local=local)
461 461 return
462 462
463 463 try:
464 464 fp = self.wfile('.hgtags', 'rb+')
465 465 except IOError, e:
466 466 if e.errno != errno.ENOENT:
467 467 raise
468 468 fp = self.wfile('.hgtags', 'ab')
469 469 else:
470 470 prevtags = fp.read()
471 471
472 472 # committed tags are stored in UTF-8
473 473 writetags(fp, names, encoding.fromlocal, prevtags)
474 474
475 475 fp.close()
476 476
477 477 self.invalidatecaches()
478 478
479 479 if '.hgtags' not in self.dirstate:
480 480 self[None].add(['.hgtags'])
481 481
482 482 m = matchmod.exact(self.root, '', ['.hgtags'])
483 483 tagnode = self.commit(message, user, date, extra=extra, match=m)
484 484
485 485 for name in names:
486 486 self.hook('tag', node=hex(node), tag=name, local=local)
487 487
488 488 return tagnode
489 489
490 490 def tag(self, names, node, message, local, user, date):
491 491 '''tag a revision with one or more symbolic names.
492 492
493 493 names is a list of strings or, when adding a single tag, names may be a
494 494 string.
495 495
496 496 if local is True, the tags are stored in a per-repository file.
497 497 otherwise, they are stored in the .hgtags file, and a new
498 498 changeset is committed with the change.
499 499
500 500 keyword arguments:
501 501
502 502 local: whether to store tags in non-version-controlled file
503 503 (default False)
504 504
505 505 message: commit message to use if committing
506 506
507 507 user: name of user to use if committing
508 508
509 509 date: date tuple to use if committing'''
510 510
511 511 if not local:
512 512 for x in self.status()[:5]:
513 513 if '.hgtags' in x:
514 514 raise util.Abort(_('working copy of .hgtags is changed '
515 515 '(please commit .hgtags manually)'))
516 516
517 517 self.tags() # instantiate the cache
518 518 self._tag(names, node, message, local, user, date)
519 519
520 520 @filteredpropertycache
521 521 def _tagscache(self):
522 522 '''Returns a tagscache object that contains various tags related
523 523 caches.'''
524 524
525 525 # This simplifies its cache management by having one decorated
526 526 # function (this one) and the rest simply fetch things from it.
527 527 class tagscache(object):
528 528 def __init__(self):
529 529 # These two define the set of tags for this repository. tags
530 530 # maps tag name to node; tagtypes maps tag name to 'global' or
531 531 # 'local'. (Global tags are defined by .hgtags across all
532 532 # heads, and local tags are defined in .hg/localtags.)
533 533 # They constitute the in-memory cache of tags.
534 534 self.tags = self.tagtypes = None
535 535
536 536 self.nodetagscache = self.tagslist = None
537 537
538 538 cache = tagscache()
539 539 cache.tags, cache.tagtypes = self._findtags()
540 540
541 541 return cache
542 542
543 543 def tags(self):
544 544 '''return a mapping of tag to node'''
545 545 t = {}
546 546 if self.changelog.filteredrevs:
547 547 tags, tt = self._findtags()
548 548 else:
549 549 tags = self._tagscache.tags
550 550 for k, v in tags.iteritems():
551 551 try:
552 552 # ignore tags to unknown nodes
553 553 self.changelog.rev(v)
554 554 t[k] = v
555 555 except (error.LookupError, ValueError):
556 556 pass
557 557 return t
558 558
559 559 def _findtags(self):
560 560 '''Do the hard work of finding tags. Return a pair of dicts
561 561 (tags, tagtypes) where tags maps tag name to node, and tagtypes
562 562 maps tag name to a string like \'global\' or \'local\'.
563 563 Subclasses or extensions are free to add their own tags, but
564 564 should be aware that the returned dicts will be retained for the
565 565 duration of the localrepo object.'''
566 566
567 567 # XXX what tagtype should subclasses/extensions use? Currently
568 568 # mq and bookmarks add tags, but do not set the tagtype at all.
569 569 # Should each extension invent its own tag type? Should there
570 570 # be one tagtype for all such "virtual" tags? Or is the status
571 571 # quo fine?
572 572
573 573 alltags = {} # map tag name to (node, hist)
574 574 tagtypes = {}
575 575
576 576 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
577 577 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
578 578
579 579 # Build the return dicts. Have to re-encode tag names because
580 580 # the tags module always uses UTF-8 (in order not to lose info
581 581 # writing to the cache), but the rest of Mercurial wants them in
582 582 # local encoding.
583 583 tags = {}
584 584 for (name, (node, hist)) in alltags.iteritems():
585 585 if node != nullid:
586 586 tags[encoding.tolocal(name)] = node
587 587 tags['tip'] = self.changelog.tip()
588 588 tagtypes = dict([(encoding.tolocal(name), value)
589 589 for (name, value) in tagtypes.iteritems()])
590 590 return (tags, tagtypes)
591 591
592 592 def tagtype(self, tagname):
593 593 '''
594 594 return the type of the given tag. result can be:
595 595
596 596 'local' : a local tag
597 597 'global' : a global tag
598 598 None : tag does not exist
599 599 '''
600 600
601 601 return self._tagscache.tagtypes.get(tagname)
602 602
603 603 def tagslist(self):
604 604 '''return a list of tags ordered by revision'''
605 605 if not self._tagscache.tagslist:
606 606 l = []
607 607 for t, n in self.tags().iteritems():
608 608 r = self.changelog.rev(n)
609 609 l.append((r, t, n))
610 610 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
611 611
612 612 return self._tagscache.tagslist
613 613
614 614 def nodetags(self, node):
615 615 '''return the tags associated with a node'''
616 616 if not self._tagscache.nodetagscache:
617 617 nodetagscache = {}
618 618 for t, n in self._tagscache.tags.iteritems():
619 619 nodetagscache.setdefault(n, []).append(t)
620 620 for tags in nodetagscache.itervalues():
621 621 tags.sort()
622 622 self._tagscache.nodetagscache = nodetagscache
623 623 return self._tagscache.nodetagscache.get(node, [])
624 624
625 625 def nodebookmarks(self, node):
626 626 marks = []
627 627 for bookmark, n in self._bookmarks.iteritems():
628 628 if n == node:
629 629 marks.append(bookmark)
630 630 return sorted(marks)
631 631
632 632 def branchmap(self):
633 633 '''returns a dictionary {branch: [branchheads]}'''
634 634 branchmap.updatecache(self)
635 635 return self._branchcaches[self.filtername]
636 636
637 637
638 638 def _branchtip(self, heads):
639 639 '''return the tipmost branch head in heads'''
640 640 tip = heads[-1]
641 641 for h in reversed(heads):
642 642 if not self[h].closesbranch():
643 643 tip = h
644 644 break
645 645 return tip
646 646
647 647 def branchtip(self, branch):
648 648 '''return the tip node for a given branch'''
649 649 if branch not in self.branchmap():
650 650 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
651 651 return self._branchtip(self.branchmap()[branch])
652 652
653 653 def branchtags(self):
654 654 '''return a dict where branch names map to the tipmost head of
655 655 the branch, open heads come before closed'''
656 656 bt = {}
657 657 for bn, heads in self.branchmap().iteritems():
658 658 bt[bn] = self._branchtip(heads)
659 659 return bt
660 660
661 661 def lookup(self, key):
662 662 return self[key].node()
663 663
664 664 def lookupbranch(self, key, remote=None):
665 665 repo = remote or self
666 666 if key in repo.branchmap():
667 667 return key
668 668
669 669 repo = (remote and remote.local()) and remote or self
670 670 return repo[key].branch()
671 671
672 672 def known(self, nodes):
673 673 nm = self.changelog.nodemap
674 674 pc = self._phasecache
675 675 result = []
676 676 for n in nodes:
677 677 r = nm.get(n)
678 678 resp = not (r is None or pc.phase(self, r) >= phases.secret)
679 679 result.append(resp)
680 680 return result
681 681
682 682 def local(self):
683 683 return self
684 684
685 685 def cancopy(self):
686 686 return self.local() # so statichttprepo's override of local() works
687 687
688 688 def join(self, f):
689 689 return os.path.join(self.path, f)
690 690
691 691 def wjoin(self, f):
692 692 return os.path.join(self.root, f)
693 693
694 694 def file(self, f):
695 695 if f[0] == '/':
696 696 f = f[1:]
697 697 return filelog.filelog(self.sopener, f)
698 698
699 699 def changectx(self, changeid):
700 700 return self[changeid]
701 701
702 702 def parents(self, changeid=None):
703 703 '''get list of changectxs for parents of changeid'''
704 704 return self[changeid].parents()
705 705
706 706 def setparents(self, p1, p2=nullid):
707 707 copies = self.dirstate.setparents(p1, p2)
708 708 if copies:
709 709 # Adjust copy records, the dirstate cannot do it, it
710 710 # requires access to parents manifests. Preserve them
711 711 # only for entries added to first parent.
712 712 pctx = self[p1]
713 713 for f in copies:
714 714 if f not in pctx and copies[f] in pctx:
715 715 self.dirstate.copy(copies[f], f)
716 716
717 717 def filectx(self, path, changeid=None, fileid=None):
718 718 """changeid can be a changeset revision, node, or tag.
719 719 fileid can be a file revision or node."""
720 720 return context.filectx(self, path, changeid, fileid)
721 721
722 722 def getcwd(self):
723 723 return self.dirstate.getcwd()
724 724
725 725 def pathto(self, f, cwd=None):
726 726 return self.dirstate.pathto(f, cwd)
727 727
728 728 def wfile(self, f, mode='r'):
729 729 return self.wopener(f, mode)
730 730
731 731 def _link(self, f):
732 732 return os.path.islink(self.wjoin(f))
733 733
734 734 def _loadfilter(self, filter):
735 735 if filter not in self.filterpats:
736 736 l = []
737 737 for pat, cmd in self.ui.configitems(filter):
738 738 if cmd == '!':
739 739 continue
740 740 mf = matchmod.match(self.root, '', [pat])
741 741 fn = None
742 742 params = cmd
743 743 for name, filterfn in self._datafilters.iteritems():
744 744 if cmd.startswith(name):
745 745 fn = filterfn
746 746 params = cmd[len(name):].lstrip()
747 747 break
748 748 if not fn:
749 749 fn = lambda s, c, **kwargs: util.filter(s, c)
750 750 # Wrap old filters not supporting keyword arguments
751 751 if not inspect.getargspec(fn)[2]:
752 752 oldfn = fn
753 753 fn = lambda s, c, **kwargs: oldfn(s, c)
754 754 l.append((mf, fn, params))
755 755 self.filterpats[filter] = l
756 756 return self.filterpats[filter]
757 757
758 758 def _filter(self, filterpats, filename, data):
759 759 for mf, fn, cmd in filterpats:
760 760 if mf(filename):
761 761 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
762 762 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
763 763 break
764 764
765 765 return data
766 766
767 767 @unfilteredpropertycache
768 768 def _encodefilterpats(self):
769 769 return self._loadfilter('encode')
770 770
771 771 @unfilteredpropertycache
772 772 def _decodefilterpats(self):
773 773 return self._loadfilter('decode')
774 774
775 775 def adddatafilter(self, name, filter):
776 776 self._datafilters[name] = filter
777 777
778 778 def wread(self, filename):
779 779 if self._link(filename):
780 780 data = os.readlink(self.wjoin(filename))
781 781 else:
782 782 data = self.wopener.read(filename)
783 783 return self._filter(self._encodefilterpats, filename, data)
784 784
785 785 def wwrite(self, filename, data, flags):
786 786 data = self._filter(self._decodefilterpats, filename, data)
787 787 if 'l' in flags:
788 788 self.wopener.symlink(data, filename)
789 789 else:
790 790 self.wopener.write(filename, data)
791 791 if 'x' in flags:
792 792 util.setflags(self.wjoin(filename), False, True)
793 793
794 794 def wwritedata(self, filename, data):
795 795 return self._filter(self._decodefilterpats, filename, data)
796 796
797 797 def transaction(self, desc):
798 798 tr = self._transref and self._transref() or None
799 799 if tr and tr.running():
800 800 return tr.nest()
801 801
802 802 # abort here if the journal already exists
803 803 if os.path.exists(self.sjoin("journal")):
804 804 raise error.RepoError(
805 805 _("abandoned transaction found - run hg recover"))
806 806
807 807 self._writejournal(desc)
808 808 renames = [(x, undoname(x)) for x in self._journalfiles()]
809 809
810 810 tr = transaction.transaction(self.ui.warn, self.sopener,
811 811 self.sjoin("journal"),
812 812 aftertrans(renames),
813 813 self.store.createmode)
814 814 self._transref = weakref.ref(tr)
815 815 return tr
816 816
817 817 def _journalfiles(self):
818 818 return (self.sjoin('journal'), self.join('journal.dirstate'),
819 819 self.join('journal.branch'), self.join('journal.desc'),
820 820 self.join('journal.bookmarks'),
821 821 self.sjoin('journal.phaseroots'))
822 822
823 823 def undofiles(self):
824 824 return [undoname(x) for x in self._journalfiles()]
825 825
826 826 def _writejournal(self, desc):
827 827 self.opener.write("journal.dirstate",
828 828 self.opener.tryread("dirstate"))
829 829 self.opener.write("journal.branch",
830 830 encoding.fromlocal(self.dirstate.branch()))
831 831 self.opener.write("journal.desc",
832 832 "%d\n%s\n" % (len(self), desc))
833 833 self.opener.write("journal.bookmarks",
834 834 self.opener.tryread("bookmarks"))
835 835 self.sopener.write("journal.phaseroots",
836 836 self.sopener.tryread("phaseroots"))
837 837
838 838 def recover(self):
839 839 lock = self.lock()
840 840 try:
841 841 if os.path.exists(self.sjoin("journal")):
842 842 self.ui.status(_("rolling back interrupted transaction\n"))
843 843 transaction.rollback(self.sopener, self.sjoin("journal"),
844 844 self.ui.warn)
845 845 self.invalidate()
846 846 return True
847 847 else:
848 848 self.ui.warn(_("no interrupted transaction available\n"))
849 849 return False
850 850 finally:
851 851 lock.release()
852 852
853 853 def rollback(self, dryrun=False, force=False):
854 854 wlock = lock = None
855 855 try:
856 856 wlock = self.wlock()
857 857 lock = self.lock()
858 858 if os.path.exists(self.sjoin("undo")):
859 859 return self._rollback(dryrun, force)
860 860 else:
861 861 self.ui.warn(_("no rollback information available\n"))
862 862 return 1
863 863 finally:
864 864 release(lock, wlock)
865 865
866 866 @unfilteredmethod # Until we get smarter cache management
867 867 def _rollback(self, dryrun, force):
868 868 ui = self.ui
869 869 try:
870 870 args = self.opener.read('undo.desc').splitlines()
871 871 (oldlen, desc, detail) = (int(args[0]), args[1], None)
872 872 if len(args) >= 3:
873 873 detail = args[2]
874 874 oldtip = oldlen - 1
875 875
876 876 if detail and ui.verbose:
877 877 msg = (_('repository tip rolled back to revision %s'
878 878 ' (undo %s: %s)\n')
879 879 % (oldtip, desc, detail))
880 880 else:
881 881 msg = (_('repository tip rolled back to revision %s'
882 882 ' (undo %s)\n')
883 883 % (oldtip, desc))
884 884 except IOError:
885 885 msg = _('rolling back unknown transaction\n')
886 886 desc = None
887 887
888 888 if not force and self['.'] != self['tip'] and desc == 'commit':
889 889 raise util.Abort(
890 890 _('rollback of last commit while not checked out '
891 891 'may lose data'), hint=_('use -f to force'))
892 892
893 893 ui.status(msg)
894 894 if dryrun:
895 895 return 0
896 896
897 897 parents = self.dirstate.parents()
898 898 self.destroying()
899 899 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
900 900 if os.path.exists(self.join('undo.bookmarks')):
901 901 util.rename(self.join('undo.bookmarks'),
902 902 self.join('bookmarks'))
903 903 if os.path.exists(self.sjoin('undo.phaseroots')):
904 904 util.rename(self.sjoin('undo.phaseroots'),
905 905 self.sjoin('phaseroots'))
906 906 self.invalidate()
907 907
908 908 parentgone = (parents[0] not in self.changelog.nodemap or
909 909 parents[1] not in self.changelog.nodemap)
910 910 if parentgone:
911 911 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
912 912 try:
913 913 branch = self.opener.read('undo.branch')
914 914 self.dirstate.setbranch(encoding.tolocal(branch))
915 915 except IOError:
916 916 ui.warn(_('named branch could not be reset: '
917 917 'current branch is still \'%s\'\n')
918 918 % self.dirstate.branch())
919 919
920 920 self.dirstate.invalidate()
921 921 parents = tuple([p.rev() for p in self.parents()])
922 922 if len(parents) > 1:
923 923 ui.status(_('working directory now based on '
924 924 'revisions %d and %d\n') % parents)
925 925 else:
926 926 ui.status(_('working directory now based on '
927 927 'revision %d\n') % parents)
928 928 # TODO: if we know which new heads may result from this rollback, pass
929 929 # them to destroy(), which will prevent the branchhead cache from being
930 930 # invalidated.
931 931 self.destroyed()
932 932 return 0
933 933
934 934 def invalidatecaches(self):
935 935
936 936 if '_tagscache' in vars(self):
937 937 # can't use delattr on proxy
938 938 del self.__dict__['_tagscache']
939 939
940 940 self.unfiltered()._branchcaches.clear()
941 941 self.invalidatevolatilesets()
942 942
943 943 def invalidatevolatilesets(self):
944 944 self.filteredrevcache.clear()
945 945 obsolete.clearobscaches(self)
946 946
947 947 def invalidatedirstate(self):
948 948 '''Invalidates the dirstate, causing the next call to dirstate
949 949 to check if it was modified since the last time it was read,
950 950 rereading it if it has.
951 951
952 952 This is different to dirstate.invalidate() that it doesn't always
953 953 rereads the dirstate. Use dirstate.invalidate() if you want to
954 954 explicitly read the dirstate again (i.e. restoring it to a previous
955 955 known good state).'''
956 956 if hasunfilteredcache(self, 'dirstate'):
957 957 for k in self.dirstate._filecache:
958 958 try:
959 959 delattr(self.dirstate, k)
960 960 except AttributeError:
961 961 pass
962 962 delattr(self.unfiltered(), 'dirstate')
963 963
964 964 def invalidate(self):
965 965 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
966 966 for k in self._filecache:
967 967 # dirstate is invalidated separately in invalidatedirstate()
968 968 if k == 'dirstate':
969 969 continue
970 970
971 971 try:
972 972 delattr(unfiltered, k)
973 973 except AttributeError:
974 974 pass
975 975 self.invalidatecaches()
976 976
977 977 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
978 978 try:
979 979 l = lock.lock(lockname, 0, releasefn, desc=desc)
980 980 except error.LockHeld, inst:
981 981 if not wait:
982 982 raise
983 983 self.ui.warn(_("waiting for lock on %s held by %r\n") %
984 984 (desc, inst.locker))
985 985 # default to 600 seconds timeout
986 986 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
987 987 releasefn, desc=desc)
988 988 if acquirefn:
989 989 acquirefn()
990 990 return l
991 991
992 992 def _afterlock(self, callback):
993 993 """add a callback to the current repository lock.
994 994
995 995 The callback will be executed on lock release."""
996 996 l = self._lockref and self._lockref()
997 997 if l:
998 998 l.postrelease.append(callback)
999 999 else:
1000 1000 callback()
1001 1001
1002 1002 def lock(self, wait=True):
1003 1003 '''Lock the repository store (.hg/store) and return a weak reference
1004 1004 to the lock. Use this before modifying the store (e.g. committing or
1005 1005 stripping). If you are opening a transaction, get a lock as well.)'''
1006 1006 l = self._lockref and self._lockref()
1007 1007 if l is not None and l.held:
1008 1008 l.lock()
1009 1009 return l
1010 1010
1011 1011 def unlock():
1012 1012 self.store.write()
1013 1013 if hasunfilteredcache(self, '_phasecache'):
1014 1014 self._phasecache.write()
1015 1015 for k, ce in self._filecache.items():
1016 1016 if k == 'dirstate' or k not in self.__dict__:
1017 1017 continue
1018 1018 ce.refresh()
1019 1019
1020 1020 l = self._lock(self.sjoin("lock"), wait, unlock,
1021 1021 self.invalidate, _('repository %s') % self.origroot)
1022 1022 self._lockref = weakref.ref(l)
1023 1023 return l
1024 1024
1025 1025 def wlock(self, wait=True):
1026 1026 '''Lock the non-store parts of the repository (everything under
1027 1027 .hg except .hg/store) and return a weak reference to the lock.
1028 1028 Use this before modifying files in .hg.'''
1029 1029 l = self._wlockref and self._wlockref()
1030 1030 if l is not None and l.held:
1031 1031 l.lock()
1032 1032 return l
1033 1033
1034 1034 def unlock():
1035 1035 self.dirstate.write()
1036 1036 self._filecache['dirstate'].refresh()
1037 1037
1038 1038 l = self._lock(self.join("wlock"), wait, unlock,
1039 1039 self.invalidatedirstate, _('working directory of %s') %
1040 1040 self.origroot)
1041 1041 self._wlockref = weakref.ref(l)
1042 1042 return l
1043 1043
1044 1044 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1045 1045 """
1046 1046 commit an individual file as part of a larger transaction
1047 1047 """
1048 1048
1049 1049 fname = fctx.path()
1050 1050 text = fctx.data()
1051 1051 flog = self.file(fname)
1052 1052 fparent1 = manifest1.get(fname, nullid)
1053 1053 fparent2 = fparent2o = manifest2.get(fname, nullid)
1054 1054
1055 1055 meta = {}
1056 1056 copy = fctx.renamed()
1057 1057 if copy and copy[0] != fname:
1058 1058 # Mark the new revision of this file as a copy of another
1059 1059 # file. This copy data will effectively act as a parent
1060 1060 # of this new revision. If this is a merge, the first
1061 1061 # parent will be the nullid (meaning "look up the copy data")
1062 1062 # and the second one will be the other parent. For example:
1063 1063 #
1064 1064 # 0 --- 1 --- 3 rev1 changes file foo
1065 1065 # \ / rev2 renames foo to bar and changes it
1066 1066 # \- 2 -/ rev3 should have bar with all changes and
1067 1067 # should record that bar descends from
1068 1068 # bar in rev2 and foo in rev1
1069 1069 #
1070 1070 # this allows this merge to succeed:
1071 1071 #
1072 1072 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1073 1073 # \ / merging rev3 and rev4 should use bar@rev2
1074 1074 # \- 2 --- 4 as the merge base
1075 1075 #
1076 1076
1077 1077 cfname = copy[0]
1078 1078 crev = manifest1.get(cfname)
1079 1079 newfparent = fparent2
1080 1080
1081 1081 if manifest2: # branch merge
1082 1082 if fparent2 == nullid or crev is None: # copied on remote side
1083 1083 if cfname in manifest2:
1084 1084 crev = manifest2[cfname]
1085 1085 newfparent = fparent1
1086 1086
1087 1087 # find source in nearest ancestor if we've lost track
1088 1088 if not crev:
1089 1089 self.ui.debug(" %s: searching for copy revision for %s\n" %
1090 1090 (fname, cfname))
1091 1091 for ancestor in self[None].ancestors():
1092 1092 if cfname in ancestor:
1093 1093 crev = ancestor[cfname].filenode()
1094 1094 break
1095 1095
1096 1096 if crev:
1097 1097 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1098 1098 meta["copy"] = cfname
1099 1099 meta["copyrev"] = hex(crev)
1100 1100 fparent1, fparent2 = nullid, newfparent
1101 1101 else:
1102 1102 self.ui.warn(_("warning: can't find ancestor for '%s' "
1103 1103 "copied from '%s'!\n") % (fname, cfname))
1104 1104
1105 1105 elif fparent2 != nullid:
1106 1106 # is one parent an ancestor of the other?
1107 1107 fparentancestor = flog.ancestor(fparent1, fparent2)
1108 1108 if fparentancestor == fparent1:
1109 1109 fparent1, fparent2 = fparent2, nullid
1110 1110 elif fparentancestor == fparent2:
1111 1111 fparent2 = nullid
1112 1112
1113 1113 # is the file changed?
1114 1114 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1115 1115 changelist.append(fname)
1116 1116 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1117 1117
1118 1118 # are just the flags changed during merge?
1119 1119 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1120 1120 changelist.append(fname)
1121 1121
1122 1122 return fparent1
1123 1123
1124 1124 @unfilteredmethod
1125 1125 def commit(self, text="", user=None, date=None, match=None, force=False,
1126 1126 editor=False, extra={}):
1127 1127 """Add a new revision to current repository.
1128 1128
1129 1129 Revision information is gathered from the working directory,
1130 1130 match can be used to filter the committed files. If editor is
1131 1131 supplied, it is called to get a commit message.
1132 1132 """
1133 1133
1134 1134 def fail(f, msg):
1135 1135 raise util.Abort('%s: %s' % (f, msg))
1136 1136
1137 1137 if not match:
1138 1138 match = matchmod.always(self.root, '')
1139 1139
1140 1140 if not force:
1141 1141 vdirs = []
1142 1142 match.dir = vdirs.append
1143 1143 match.bad = fail
1144 1144
1145 1145 wlock = self.wlock()
1146 1146 try:
1147 1147 wctx = self[None]
1148 1148 merge = len(wctx.parents()) > 1
1149 1149
1150 1150 if (not force and merge and match and
1151 1151 (match.files() or match.anypats())):
1152 1152 raise util.Abort(_('cannot partially commit a merge '
1153 1153 '(do not specify files or patterns)'))
1154 1154
1155 1155 changes = self.status(match=match, clean=force)
1156 1156 if force:
1157 1157 changes[0].extend(changes[6]) # mq may commit unchanged files
1158 1158
1159 1159 # check subrepos
1160 1160 subs = []
1161 1161 commitsubs = set()
1162 1162 newstate = wctx.substate.copy()
1163 1163 # only manage subrepos and .hgsubstate if .hgsub is present
1164 1164 if '.hgsub' in wctx:
1165 1165 # we'll decide whether to track this ourselves, thanks
1166 1166 if '.hgsubstate' in changes[0]:
1167 1167 changes[0].remove('.hgsubstate')
1168 1168 if '.hgsubstate' in changes[2]:
1169 1169 changes[2].remove('.hgsubstate')
1170 1170
1171 1171 # compare current state to last committed state
1172 1172 # build new substate based on last committed state
1173 1173 oldstate = wctx.p1().substate
1174 1174 for s in sorted(newstate.keys()):
1175 1175 if not match(s):
1176 1176 # ignore working copy, use old state if present
1177 1177 if s in oldstate:
1178 1178 newstate[s] = oldstate[s]
1179 1179 continue
1180 1180 if not force:
1181 1181 raise util.Abort(
1182 1182 _("commit with new subrepo %s excluded") % s)
1183 1183 if wctx.sub(s).dirty(True):
1184 1184 if not self.ui.configbool('ui', 'commitsubrepos'):
1185 1185 raise util.Abort(
1186 1186 _("uncommitted changes in subrepo %s") % s,
1187 1187 hint=_("use --subrepos for recursive commit"))
1188 1188 subs.append(s)
1189 1189 commitsubs.add(s)
1190 1190 else:
1191 1191 bs = wctx.sub(s).basestate()
1192 1192 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1193 1193 if oldstate.get(s, (None, None, None))[1] != bs:
1194 1194 subs.append(s)
1195 1195
1196 1196 # check for removed subrepos
1197 1197 for p in wctx.parents():
1198 1198 r = [s for s in p.substate if s not in newstate]
1199 1199 subs += [s for s in r if match(s)]
1200 1200 if subs:
1201 1201 if (not match('.hgsub') and
1202 1202 '.hgsub' in (wctx.modified() + wctx.added())):
1203 1203 raise util.Abort(
1204 1204 _("can't commit subrepos without .hgsub"))
1205 1205 changes[0].insert(0, '.hgsubstate')
1206 1206
1207 1207 elif '.hgsub' in changes[2]:
1208 1208 # clean up .hgsubstate when .hgsub is removed
1209 1209 if ('.hgsubstate' in wctx and
1210 1210 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1211 1211 changes[2].insert(0, '.hgsubstate')
1212 1212
1213 1213 # make sure all explicit patterns are matched
1214 1214 if not force and match.files():
1215 1215 matched = set(changes[0] + changes[1] + changes[2])
1216 1216
1217 1217 for f in match.files():
1218 1218 f = self.dirstate.normalize(f)
1219 1219 if f == '.' or f in matched or f in wctx.substate:
1220 1220 continue
1221 1221 if f in changes[3]: # missing
1222 1222 fail(f, _('file not found!'))
1223 1223 if f in vdirs: # visited directory
1224 1224 d = f + '/'
1225 1225 for mf in matched:
1226 1226 if mf.startswith(d):
1227 1227 break
1228 1228 else:
1229 1229 fail(f, _("no match under directory!"))
1230 1230 elif f not in self.dirstate:
1231 1231 fail(f, _("file not tracked!"))
1232 1232
1233 1233 if (not force and not extra.get("close") and not merge
1234 1234 and not (changes[0] or changes[1] or changes[2])
1235 1235 and wctx.branch() == wctx.p1().branch()):
1236 1236 return None
1237 1237
1238 1238 if merge and changes[3]:
1239 1239 raise util.Abort(_("cannot commit merge with missing files"))
1240 1240
1241 1241 ms = mergemod.mergestate(self)
1242 1242 for f in changes[0]:
1243 1243 if f in ms and ms[f] == 'u':
1244 1244 raise util.Abort(_("unresolved merge conflicts "
1245 1245 "(see hg help resolve)"))
1246 1246
1247 1247 cctx = context.workingctx(self, text, user, date, extra, changes)
1248 1248 if editor:
1249 1249 cctx._text = editor(self, cctx, subs)
1250 1250 edited = (text != cctx._text)
1251 1251
1252 1252 # commit subs and write new state
1253 1253 if subs:
1254 1254 for s in sorted(commitsubs):
1255 1255 sub = wctx.sub(s)
1256 1256 self.ui.status(_('committing subrepository %s\n') %
1257 1257 subrepo.subrelpath(sub))
1258 1258 sr = sub.commit(cctx._text, user, date)
1259 1259 newstate[s] = (newstate[s][0], sr)
1260 1260 subrepo.writestate(self, newstate)
1261 1261
1262 1262 # Save commit message in case this transaction gets rolled back
1263 1263 # (e.g. by a pretxncommit hook). Leave the content alone on
1264 1264 # the assumption that the user will use the same editor again.
1265 1265 msgfn = self.savecommitmessage(cctx._text)
1266 1266
1267 1267 p1, p2 = self.dirstate.parents()
1268 1268 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1269 1269 try:
1270 1270 self.hook("precommit", throw=True, parent1=hookp1,
1271 1271 parent2=hookp2)
1272 1272 ret = self.commitctx(cctx, True)
1273 1273 except: # re-raises
1274 1274 if edited:
1275 1275 self.ui.write(
1276 1276 _('note: commit message saved in %s\n') % msgfn)
1277 1277 raise
1278 1278
1279 1279 # update bookmarks, dirstate and mergestate
1280 1280 bookmarks.update(self, [p1, p2], ret)
1281 1281 for f in changes[0] + changes[1]:
1282 1282 self.dirstate.normal(f)
1283 1283 for f in changes[2]:
1284 1284 self.dirstate.drop(f)
1285 1285 self.dirstate.setparents(ret)
1286 1286 ms.reset()
1287 1287 finally:
1288 1288 wlock.release()
1289 1289
1290 1290 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1291 1291 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1292 1292 self._afterlock(commithook)
1293 1293 return ret
1294 1294
1295 1295 @unfilteredmethod
1296 1296 def commitctx(self, ctx, error=False):
1297 1297 """Add a new revision to current repository.
1298 1298 Revision information is passed via the context argument.
1299 1299 """
1300 1300
1301 1301 tr = lock = None
1302 1302 removed = list(ctx.removed())
1303 1303 p1, p2 = ctx.p1(), ctx.p2()
1304 1304 user = ctx.user()
1305 1305
1306 1306 lock = self.lock()
1307 1307 try:
1308 1308 tr = self.transaction("commit")
1309 1309 trp = weakref.proxy(tr)
1310 1310
1311 1311 if ctx.files():
1312 1312 m1 = p1.manifest().copy()
1313 1313 m2 = p2.manifest()
1314 1314
1315 1315 # check in files
1316 1316 new = {}
1317 1317 changed = []
1318 1318 linkrev = len(self)
1319 1319 for f in sorted(ctx.modified() + ctx.added()):
1320 1320 self.ui.note(f + "\n")
1321 1321 try:
1322 1322 fctx = ctx[f]
1323 1323 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1324 1324 changed)
1325 1325 m1.set(f, fctx.flags())
1326 1326 except OSError, inst:
1327 1327 self.ui.warn(_("trouble committing %s!\n") % f)
1328 1328 raise
1329 1329 except IOError, inst:
1330 1330 errcode = getattr(inst, 'errno', errno.ENOENT)
1331 1331 if error or errcode and errcode != errno.ENOENT:
1332 1332 self.ui.warn(_("trouble committing %s!\n") % f)
1333 1333 raise
1334 1334 else:
1335 1335 removed.append(f)
1336 1336
1337 1337 # update manifest
1338 1338 m1.update(new)
1339 1339 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1340 1340 drop = [f for f in removed if f in m1]
1341 1341 for f in drop:
1342 1342 del m1[f]
1343 1343 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1344 1344 p2.manifestnode(), (new, drop))
1345 1345 files = changed + removed
1346 1346 else:
1347 1347 mn = p1.manifestnode()
1348 1348 files = []
1349 1349
1350 1350 # update changelog
1351 1351 self.changelog.delayupdate()
1352 1352 n = self.changelog.add(mn, files, ctx.description(),
1353 1353 trp, p1.node(), p2.node(),
1354 1354 user, ctx.date(), ctx.extra().copy())
1355 1355 p = lambda: self.changelog.writepending() and self.root or ""
1356 1356 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1357 1357 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1358 1358 parent2=xp2, pending=p)
1359 1359 self.changelog.finalize(trp)
1360 1360 # set the new commit is proper phase
1361 1361 targetphase = phases.newcommitphase(self.ui)
1362 1362 if targetphase:
1363 1363 # retract boundary do not alter parent changeset.
1364 1364 # if a parent have higher the resulting phase will
1365 1365 # be compliant anyway
1366 1366 #
1367 1367 # if minimal phase was 0 we don't need to retract anything
1368 1368 phases.retractboundary(self, targetphase, [n])
1369 1369 tr.close()
1370 branchmap.updatecache(self)
1370 branchmap.updatecache(self.filtered('served'))
1371 1371 return n
1372 1372 finally:
1373 1373 if tr:
1374 1374 tr.release()
1375 1375 lock.release()
1376 1376
1377 1377 @unfilteredmethod
1378 1378 def destroying(self):
1379 1379 '''Inform the repository that nodes are about to be destroyed.
1380 1380 Intended for use by strip and rollback, so there's a common
1381 1381 place for anything that has to be done before destroying history.
1382 1382
1383 1383 This is mostly useful for saving state that is in memory and waiting
1384 1384 to be flushed when the current lock is released. Because a call to
1385 1385 destroyed is imminent, the repo will be invalidated causing those
1386 1386 changes to stay in memory (waiting for the next unlock), or vanish
1387 1387 completely.
1388 1388 '''
1389 1389 # It simplifies the logic around updating the branchheads cache if we
1390 1390 # only have to consider the effect of the stripped revisions and not
1391 1391 # revisions missing because the cache is out-of-date.
1392 1392 branchmap.updatecache(self)
1393 1393
1394 1394 # When using the same lock to commit and strip, the phasecache is left
1395 1395 # dirty after committing. Then when we strip, the repo is invalidated,
1396 1396 # causing those changes to disappear.
1397 1397 if '_phasecache' in vars(self):
1398 1398 self._phasecache.write()
1399 1399
1400 1400 @unfilteredmethod
1401 1401 def destroyed(self, newheadnodes=None):
1402 1402 '''Inform the repository that nodes have been destroyed.
1403 1403 Intended for use by strip and rollback, so there's a common
1404 1404 place for anything that has to be done after destroying history.
1405 1405
1406 1406 If you know the branchheadcache was uptodate before nodes were removed
1407 1407 and you also know the set of candidate new heads that may have resulted
1408 1408 from the destruction, you can set newheadnodes. This will enable the
1409 1409 code to update the branchheads cache, rather than having future code
1410 1410 decide it's invalid and regenerating it from scratch.
1411 1411 '''
1412 1412 # When one tries to:
1413 1413 # 1) destroy nodes thus calling this method (e.g. strip)
1414 1414 # 2) use phasecache somewhere (e.g. commit)
1415 1415 #
1416 1416 # then 2) will fail because the phasecache contains nodes that were
1417 1417 # removed. We can either remove phasecache from the filecache,
1418 1418 # causing it to reload next time it is accessed, or simply filter
1419 1419 # the removed nodes now and write the updated cache.
1420 1420 if '_phasecache' in self._filecache:
1421 1421 self._phasecache.filterunknown(self)
1422 1422 self._phasecache.write()
1423 1423
1424 1424 # If we have info, newheadnodes, on how to update the branch cache, do
1425 1425 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1426 1426 # will be caught the next time it is read.
1427 1427 if newheadnodes:
1428 1428 cl = self.changelog
1429 1429 revgen = (cl.rev(node) for node in newheadnodes
1430 1430 if cl.hasnode(node))
1431 1431 cache = self._branchcaches[None]
1432 1432 cache.update(self, revgen)
1433 1433 cache.write(self)
1434 1434
1435 1435 # Ensure the persistent tag cache is updated. Doing it now
1436 1436 # means that the tag cache only has to worry about destroyed
1437 1437 # heads immediately after a strip/rollback. That in turn
1438 1438 # guarantees that "cachetip == currenttip" (comparing both rev
1439 1439 # and node) always means no nodes have been added or destroyed.
1440 1440
1441 1441 # XXX this is suboptimal when qrefresh'ing: we strip the current
1442 1442 # head, refresh the tag cache, then immediately add a new head.
1443 1443 # But I think doing it this way is necessary for the "instant
1444 1444 # tag cache retrieval" case to work.
1445 1445 self.invalidate()
1446 1446
1447 1447 def walk(self, match, node=None):
1448 1448 '''
1449 1449 walk recursively through the directory tree or a given
1450 1450 changeset, finding all files matched by the match
1451 1451 function
1452 1452 '''
1453 1453 return self[node].walk(match)
1454 1454
1455 1455 def status(self, node1='.', node2=None, match=None,
1456 1456 ignored=False, clean=False, unknown=False,
1457 1457 listsubrepos=False):
1458 1458 """return status of files between two nodes or node and working
1459 1459 directory.
1460 1460
1461 1461 If node1 is None, use the first dirstate parent instead.
1462 1462 If node2 is None, compare node1 with working directory.
1463 1463 """
1464 1464
1465 1465 def mfmatches(ctx):
1466 1466 mf = ctx.manifest().copy()
1467 1467 if match.always():
1468 1468 return mf
1469 1469 for fn in mf.keys():
1470 1470 if not match(fn):
1471 1471 del mf[fn]
1472 1472 return mf
1473 1473
1474 1474 if isinstance(node1, context.changectx):
1475 1475 ctx1 = node1
1476 1476 else:
1477 1477 ctx1 = self[node1]
1478 1478 if isinstance(node2, context.changectx):
1479 1479 ctx2 = node2
1480 1480 else:
1481 1481 ctx2 = self[node2]
1482 1482
1483 1483 working = ctx2.rev() is None
1484 1484 parentworking = working and ctx1 == self['.']
1485 1485 match = match or matchmod.always(self.root, self.getcwd())
1486 1486 listignored, listclean, listunknown = ignored, clean, unknown
1487 1487
1488 1488 # load earliest manifest first for caching reasons
1489 1489 if not working and ctx2.rev() < ctx1.rev():
1490 1490 ctx2.manifest()
1491 1491
1492 1492 if not parentworking:
1493 1493 def bad(f, msg):
1494 1494 # 'f' may be a directory pattern from 'match.files()',
1495 1495 # so 'f not in ctx1' is not enough
1496 1496 if f not in ctx1 and f not in ctx1.dirs():
1497 1497 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1498 1498 match.bad = bad
1499 1499
1500 1500 if working: # we need to scan the working dir
1501 1501 subrepos = []
1502 1502 if '.hgsub' in self.dirstate:
1503 1503 subrepos = sorted(ctx2.substate)
1504 1504 s = self.dirstate.status(match, subrepos, listignored,
1505 1505 listclean, listunknown)
1506 1506 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1507 1507
1508 1508 # check for any possibly clean files
1509 1509 if parentworking and cmp:
1510 1510 fixup = []
1511 1511 # do a full compare of any files that might have changed
1512 1512 for f in sorted(cmp):
1513 1513 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1514 1514 or ctx1[f].cmp(ctx2[f])):
1515 1515 modified.append(f)
1516 1516 else:
1517 1517 fixup.append(f)
1518 1518
1519 1519 # update dirstate for files that are actually clean
1520 1520 if fixup:
1521 1521 if listclean:
1522 1522 clean += fixup
1523 1523
1524 1524 try:
1525 1525 # updating the dirstate is optional
1526 1526 # so we don't wait on the lock
1527 1527 wlock = self.wlock(False)
1528 1528 try:
1529 1529 for f in fixup:
1530 1530 self.dirstate.normal(f)
1531 1531 finally:
1532 1532 wlock.release()
1533 1533 except error.LockError:
1534 1534 pass
1535 1535
1536 1536 if not parentworking:
1537 1537 mf1 = mfmatches(ctx1)
1538 1538 if working:
1539 1539 # we are comparing working dir against non-parent
1540 1540 # generate a pseudo-manifest for the working dir
1541 1541 mf2 = mfmatches(self['.'])
1542 1542 for f in cmp + modified + added:
1543 1543 mf2[f] = None
1544 1544 mf2.set(f, ctx2.flags(f))
1545 1545 for f in removed:
1546 1546 if f in mf2:
1547 1547 del mf2[f]
1548 1548 else:
1549 1549 # we are comparing two revisions
1550 1550 deleted, unknown, ignored = [], [], []
1551 1551 mf2 = mfmatches(ctx2)
1552 1552
1553 1553 modified, added, clean = [], [], []
1554 1554 withflags = mf1.withflags() | mf2.withflags()
1555 1555 for fn in mf2:
1556 1556 if fn in mf1:
1557 1557 if (fn not in deleted and
1558 1558 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1559 1559 (mf1[fn] != mf2[fn] and
1560 1560 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1561 1561 modified.append(fn)
1562 1562 elif listclean:
1563 1563 clean.append(fn)
1564 1564 del mf1[fn]
1565 1565 elif fn not in deleted:
1566 1566 added.append(fn)
1567 1567 removed = mf1.keys()
1568 1568
1569 1569 if working and modified and not self.dirstate._checklink:
1570 1570 # Symlink placeholders may get non-symlink-like contents
1571 1571 # via user error or dereferencing by NFS or Samba servers,
1572 1572 # so we filter out any placeholders that don't look like a
1573 1573 # symlink
1574 1574 sane = []
1575 1575 for f in modified:
1576 1576 if ctx2.flags(f) == 'l':
1577 1577 d = ctx2[f].data()
1578 1578 if len(d) >= 1024 or '\n' in d or util.binary(d):
1579 1579 self.ui.debug('ignoring suspect symlink placeholder'
1580 1580 ' "%s"\n' % f)
1581 1581 continue
1582 1582 sane.append(f)
1583 1583 modified = sane
1584 1584
1585 1585 r = modified, added, removed, deleted, unknown, ignored, clean
1586 1586
1587 1587 if listsubrepos:
1588 1588 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1589 1589 if working:
1590 1590 rev2 = None
1591 1591 else:
1592 1592 rev2 = ctx2.substate[subpath][1]
1593 1593 try:
1594 1594 submatch = matchmod.narrowmatcher(subpath, match)
1595 1595 s = sub.status(rev2, match=submatch, ignored=listignored,
1596 1596 clean=listclean, unknown=listunknown,
1597 1597 listsubrepos=True)
1598 1598 for rfiles, sfiles in zip(r, s):
1599 1599 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1600 1600 except error.LookupError:
1601 1601 self.ui.status(_("skipping missing subrepository: %s\n")
1602 1602 % subpath)
1603 1603
1604 1604 for l in r:
1605 1605 l.sort()
1606 1606 return r
1607 1607
1608 1608 def heads(self, start=None):
1609 1609 heads = self.changelog.heads(start)
1610 1610 # sort the output in rev descending order
1611 1611 return sorted(heads, key=self.changelog.rev, reverse=True)
1612 1612
1613 1613 def branchheads(self, branch=None, start=None, closed=False):
1614 1614 '''return a (possibly filtered) list of heads for the given branch
1615 1615
1616 1616 Heads are returned in topological order, from newest to oldest.
1617 1617 If branch is None, use the dirstate branch.
1618 1618 If start is not None, return only heads reachable from start.
1619 1619 If closed is True, return heads that are marked as closed as well.
1620 1620 '''
1621 1621 if branch is None:
1622 1622 branch = self[None].branch()
1623 1623 branches = self.branchmap()
1624 1624 if branch not in branches:
1625 1625 return []
1626 1626 # the cache returns heads ordered lowest to highest
1627 1627 bheads = list(reversed(branches[branch]))
1628 1628 if start is not None:
1629 1629 # filter out the heads that cannot be reached from startrev
1630 1630 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1631 1631 bheads = [h for h in bheads if h in fbheads]
1632 1632 if not closed:
1633 1633 bheads = [h for h in bheads if not self[h].closesbranch()]
1634 1634 return bheads
1635 1635
1636 1636 def branches(self, nodes):
1637 1637 if not nodes:
1638 1638 nodes = [self.changelog.tip()]
1639 1639 b = []
1640 1640 for n in nodes:
1641 1641 t = n
1642 1642 while True:
1643 1643 p = self.changelog.parents(n)
1644 1644 if p[1] != nullid or p[0] == nullid:
1645 1645 b.append((t, n, p[0], p[1]))
1646 1646 break
1647 1647 n = p[0]
1648 1648 return b
1649 1649
1650 1650 def between(self, pairs):
1651 1651 r = []
1652 1652
1653 1653 for top, bottom in pairs:
1654 1654 n, l, i = top, [], 0
1655 1655 f = 1
1656 1656
1657 1657 while n != bottom and n != nullid:
1658 1658 p = self.changelog.parents(n)[0]
1659 1659 if i == f:
1660 1660 l.append(n)
1661 1661 f = f * 2
1662 1662 n = p
1663 1663 i += 1
1664 1664
1665 1665 r.append(l)
1666 1666
1667 1667 return r
1668 1668
1669 1669 def pull(self, remote, heads=None, force=False):
1670 1670 # don't open transaction for nothing or you break future useful
1671 1671 # rollback call
1672 1672 tr = None
1673 1673 trname = 'pull\n' + util.hidepassword(remote.url())
1674 1674 lock = self.lock()
1675 1675 try:
1676 1676 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1677 1677 force=force)
1678 1678 common, fetch, rheads = tmp
1679 1679 if not fetch:
1680 1680 self.ui.status(_("no changes found\n"))
1681 1681 added = []
1682 1682 result = 0
1683 1683 else:
1684 1684 tr = self.transaction(trname)
1685 1685 if heads is None and list(common) == [nullid]:
1686 1686 self.ui.status(_("requesting all changes\n"))
1687 1687 elif heads is None and remote.capable('changegroupsubset'):
1688 1688 # issue1320, avoid a race if remote changed after discovery
1689 1689 heads = rheads
1690 1690
1691 1691 if remote.capable('getbundle'):
1692 1692 cg = remote.getbundle('pull', common=common,
1693 1693 heads=heads or rheads)
1694 1694 elif heads is None:
1695 1695 cg = remote.changegroup(fetch, 'pull')
1696 1696 elif not remote.capable('changegroupsubset'):
1697 1697 raise util.Abort(_("partial pull cannot be done because "
1698 1698 "other repository doesn't support "
1699 1699 "changegroupsubset."))
1700 1700 else:
1701 1701 cg = remote.changegroupsubset(fetch, heads, 'pull')
1702 1702 clstart = len(self.changelog)
1703 1703 result = self.addchangegroup(cg, 'pull', remote.url())
1704 1704 clend = len(self.changelog)
1705 1705 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1706 1706
1707 1707 # compute target subset
1708 1708 if heads is None:
1709 1709 # We pulled every thing possible
1710 1710 # sync on everything common
1711 1711 subset = common + added
1712 1712 else:
1713 1713 # We pulled a specific subset
1714 1714 # sync on this subset
1715 1715 subset = heads
1716 1716
1717 1717 # Get remote phases data from remote
1718 1718 remotephases = remote.listkeys('phases')
1719 1719 publishing = bool(remotephases.get('publishing', False))
1720 1720 if remotephases and not publishing:
1721 1721 # remote is new and unpublishing
1722 1722 pheads, _dr = phases.analyzeremotephases(self, subset,
1723 1723 remotephases)
1724 1724 phases.advanceboundary(self, phases.public, pheads)
1725 1725 phases.advanceboundary(self, phases.draft, subset)
1726 1726 else:
1727 1727 # Remote is old or publishing all common changesets
1728 1728 # should be seen as public
1729 1729 phases.advanceboundary(self, phases.public, subset)
1730 1730
1731 1731 if obsolete._enabled:
1732 1732 self.ui.debug('fetching remote obsolete markers\n')
1733 1733 remoteobs = remote.listkeys('obsolete')
1734 1734 if 'dump0' in remoteobs:
1735 1735 if tr is None:
1736 1736 tr = self.transaction(trname)
1737 1737 for key in sorted(remoteobs, reverse=True):
1738 1738 if key.startswith('dump'):
1739 1739 data = base85.b85decode(remoteobs[key])
1740 1740 self.obsstore.mergemarkers(tr, data)
1741 1741 self.invalidatevolatilesets()
1742 1742 if tr is not None:
1743 1743 tr.close()
1744 1744 finally:
1745 1745 if tr is not None:
1746 1746 tr.release()
1747 1747 lock.release()
1748 1748
1749 1749 return result
1750 1750
1751 1751 def checkpush(self, force, revs):
1752 1752 """Extensions can override this function if additional checks have
1753 1753 to be performed before pushing, or call it if they override push
1754 1754 command.
1755 1755 """
1756 1756 pass
1757 1757
1758 1758 def push(self, remote, force=False, revs=None, newbranch=False):
1759 1759 '''Push outgoing changesets (limited by revs) from the current
1760 1760 repository to remote. Return an integer:
1761 1761 - None means nothing to push
1762 1762 - 0 means HTTP error
1763 1763 - 1 means we pushed and remote head count is unchanged *or*
1764 1764 we have outgoing changesets but refused to push
1765 1765 - other values as described by addchangegroup()
1766 1766 '''
1767 1767 # there are two ways to push to remote repo:
1768 1768 #
1769 1769 # addchangegroup assumes local user can lock remote
1770 1770 # repo (local filesystem, old ssh servers).
1771 1771 #
1772 1772 # unbundle assumes local user cannot lock remote repo (new ssh
1773 1773 # servers, http servers).
1774 1774
1775 1775 if not remote.canpush():
1776 1776 raise util.Abort(_("destination does not support push"))
1777 1777 unfi = self.unfiltered()
1778 1778 # get local lock as we might write phase data
1779 1779 locallock = self.lock()
1780 1780 try:
1781 1781 self.checkpush(force, revs)
1782 1782 lock = None
1783 1783 unbundle = remote.capable('unbundle')
1784 1784 if not unbundle:
1785 1785 lock = remote.lock()
1786 1786 try:
1787 1787 # discovery
1788 1788 fci = discovery.findcommonincoming
1789 1789 commoninc = fci(unfi, remote, force=force)
1790 1790 common, inc, remoteheads = commoninc
1791 1791 fco = discovery.findcommonoutgoing
1792 1792 outgoing = fco(unfi, remote, onlyheads=revs,
1793 1793 commoninc=commoninc, force=force)
1794 1794
1795 1795
1796 1796 if not outgoing.missing:
1797 1797 # nothing to push
1798 1798 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1799 1799 ret = None
1800 1800 else:
1801 1801 # something to push
1802 1802 if not force:
1803 1803 # if self.obsstore == False --> no obsolete
1804 1804 # then, save the iteration
1805 1805 if unfi.obsstore:
1806 1806 # this message are here for 80 char limit reason
1807 1807 mso = _("push includes obsolete changeset: %s!")
1808 1808 mst = "push includes %s changeset: %s!"
1809 1809 # plain versions for i18n tool to detect them
1810 1810 _("push includes unstable changeset: %s!")
1811 1811 _("push includes bumped changeset: %s!")
1812 1812 _("push includes divergent changeset: %s!")
1813 1813 # If we are to push if there is at least one
1814 1814 # obsolete or unstable changeset in missing, at
1815 1815 # least one of the missinghead will be obsolete or
1816 1816 # unstable. So checking heads only is ok
1817 1817 for node in outgoing.missingheads:
1818 1818 ctx = unfi[node]
1819 1819 if ctx.obsolete():
1820 1820 raise util.Abort(mso % ctx)
1821 1821 elif ctx.troubled():
1822 1822 raise util.Abort(_(mst)
1823 1823 % (ctx.troubles()[0],
1824 1824 ctx))
1825 1825 discovery.checkheads(unfi, remote, outgoing,
1826 1826 remoteheads, newbranch,
1827 1827 bool(inc))
1828 1828
1829 1829 # create a changegroup from local
1830 1830 if revs is None and not outgoing.excluded:
1831 1831 # push everything,
1832 1832 # use the fast path, no race possible on push
1833 1833 cg = self._changegroup(outgoing.missing, 'push')
1834 1834 else:
1835 1835 cg = self.getlocalbundle('push', outgoing)
1836 1836
1837 1837 # apply changegroup to remote
1838 1838 if unbundle:
1839 1839 # local repo finds heads on server, finds out what
1840 1840 # revs it must push. once revs transferred, if server
1841 1841 # finds it has different heads (someone else won
1842 1842 # commit/push race), server aborts.
1843 1843 if force:
1844 1844 remoteheads = ['force']
1845 1845 # ssh: return remote's addchangegroup()
1846 1846 # http: return remote's addchangegroup() or 0 for error
1847 1847 ret = remote.unbundle(cg, remoteheads, 'push')
1848 1848 else:
1849 1849 # we return an integer indicating remote head count
1850 1850 # change
1851 1851 ret = remote.addchangegroup(cg, 'push', self.url())
1852 1852
1853 1853 if ret:
1854 1854 # push succeed, synchronize target of the push
1855 1855 cheads = outgoing.missingheads
1856 1856 elif revs is None:
1857 1857 # All out push fails. synchronize all common
1858 1858 cheads = outgoing.commonheads
1859 1859 else:
1860 1860 # I want cheads = heads(::missingheads and ::commonheads)
1861 1861 # (missingheads is revs with secret changeset filtered out)
1862 1862 #
1863 1863 # This can be expressed as:
1864 1864 # cheads = ( (missingheads and ::commonheads)
1865 1865 # + (commonheads and ::missingheads))"
1866 1866 # )
1867 1867 #
1868 1868 # while trying to push we already computed the following:
1869 1869 # common = (::commonheads)
1870 1870 # missing = ((commonheads::missingheads) - commonheads)
1871 1871 #
1872 1872 # We can pick:
1873 1873 # * missingheads part of common (::commonheads)
1874 1874 common = set(outgoing.common)
1875 1875 cheads = [node for node in revs if node in common]
1876 1876 # and
1877 1877 # * commonheads parents on missing
1878 1878 revset = unfi.set('%ln and parents(roots(%ln))',
1879 1879 outgoing.commonheads,
1880 1880 outgoing.missing)
1881 1881 cheads.extend(c.node() for c in revset)
1882 1882 # even when we don't push, exchanging phase data is useful
1883 1883 remotephases = remote.listkeys('phases')
1884 1884 if not remotephases: # old server or public only repo
1885 1885 phases.advanceboundary(self, phases.public, cheads)
1886 1886 # don't push any phase data as there is nothing to push
1887 1887 else:
1888 1888 ana = phases.analyzeremotephases(self, cheads, remotephases)
1889 1889 pheads, droots = ana
1890 1890 ### Apply remote phase on local
1891 1891 if remotephases.get('publishing', False):
1892 1892 phases.advanceboundary(self, phases.public, cheads)
1893 1893 else: # publish = False
1894 1894 phases.advanceboundary(self, phases.public, pheads)
1895 1895 phases.advanceboundary(self, phases.draft, cheads)
1896 1896 ### Apply local phase on remote
1897 1897
1898 1898 # Get the list of all revs draft on remote by public here.
1899 1899 # XXX Beware that revset break if droots is not strictly
1900 1900 # XXX root we may want to ensure it is but it is costly
1901 1901 outdated = unfi.set('heads((%ln::%ln) and public())',
1902 1902 droots, cheads)
1903 1903 for newremotehead in outdated:
1904 1904 r = remote.pushkey('phases',
1905 1905 newremotehead.hex(),
1906 1906 str(phases.draft),
1907 1907 str(phases.public))
1908 1908 if not r:
1909 1909 self.ui.warn(_('updating %s to public failed!\n')
1910 1910 % newremotehead)
1911 1911 self.ui.debug('try to push obsolete markers to remote\n')
1912 1912 if (obsolete._enabled and self.obsstore and
1913 1913 'obsolete' in remote.listkeys('namespaces')):
1914 1914 rslts = []
1915 1915 remotedata = self.listkeys('obsolete')
1916 1916 for key in sorted(remotedata, reverse=True):
1917 1917 # reverse sort to ensure we end with dump0
1918 1918 data = remotedata[key]
1919 1919 rslts.append(remote.pushkey('obsolete', key, '', data))
1920 1920 if [r for r in rslts if not r]:
1921 1921 msg = _('failed to push some obsolete markers!\n')
1922 1922 self.ui.warn(msg)
1923 1923 finally:
1924 1924 if lock is not None:
1925 1925 lock.release()
1926 1926 finally:
1927 1927 locallock.release()
1928 1928
1929 1929 self.ui.debug("checking for updated bookmarks\n")
1930 1930 rb = remote.listkeys('bookmarks')
1931 1931 for k in rb.keys():
1932 1932 if k in unfi._bookmarks:
1933 1933 nr, nl = rb[k], hex(self._bookmarks[k])
1934 1934 if nr in unfi:
1935 1935 cr = unfi[nr]
1936 1936 cl = unfi[nl]
1937 1937 if bookmarks.validdest(unfi, cr, cl):
1938 1938 r = remote.pushkey('bookmarks', k, nr, nl)
1939 1939 if r:
1940 1940 self.ui.status(_("updating bookmark %s\n") % k)
1941 1941 else:
1942 1942 self.ui.warn(_('updating bookmark %s'
1943 1943 ' failed!\n') % k)
1944 1944
1945 1945 return ret
1946 1946
1947 1947 def changegroupinfo(self, nodes, source):
1948 1948 if self.ui.verbose or source == 'bundle':
1949 1949 self.ui.status(_("%d changesets found\n") % len(nodes))
1950 1950 if self.ui.debugflag:
1951 1951 self.ui.debug("list of changesets:\n")
1952 1952 for node in nodes:
1953 1953 self.ui.debug("%s\n" % hex(node))
1954 1954
1955 1955 def changegroupsubset(self, bases, heads, source):
1956 1956 """Compute a changegroup consisting of all the nodes that are
1957 1957 descendants of any of the bases and ancestors of any of the heads.
1958 1958 Return a chunkbuffer object whose read() method will return
1959 1959 successive changegroup chunks.
1960 1960
1961 1961 It is fairly complex as determining which filenodes and which
1962 1962 manifest nodes need to be included for the changeset to be complete
1963 1963 is non-trivial.
1964 1964
1965 1965 Another wrinkle is doing the reverse, figuring out which changeset in
1966 1966 the changegroup a particular filenode or manifestnode belongs to.
1967 1967 """
1968 1968 cl = self.changelog
1969 1969 if not bases:
1970 1970 bases = [nullid]
1971 1971 csets, bases, heads = cl.nodesbetween(bases, heads)
1972 1972 # We assume that all ancestors of bases are known
1973 1973 common = cl.ancestors([cl.rev(n) for n in bases])
1974 1974 return self._changegroupsubset(common, csets, heads, source)
1975 1975
1976 1976 def getlocalbundle(self, source, outgoing):
1977 1977 """Like getbundle, but taking a discovery.outgoing as an argument.
1978 1978
1979 1979 This is only implemented for local repos and reuses potentially
1980 1980 precomputed sets in outgoing."""
1981 1981 if not outgoing.missing:
1982 1982 return None
1983 1983 return self._changegroupsubset(outgoing.common,
1984 1984 outgoing.missing,
1985 1985 outgoing.missingheads,
1986 1986 source)
1987 1987
1988 1988 def getbundle(self, source, heads=None, common=None):
1989 1989 """Like changegroupsubset, but returns the set difference between the
1990 1990 ancestors of heads and the ancestors common.
1991 1991
1992 1992 If heads is None, use the local heads. If common is None, use [nullid].
1993 1993
1994 1994 The nodes in common might not all be known locally due to the way the
1995 1995 current discovery protocol works.
1996 1996 """
1997 1997 cl = self.changelog
1998 1998 if common:
1999 1999 hasnode = cl.hasnode
2000 2000 common = [n for n in common if hasnode(n)]
2001 2001 else:
2002 2002 common = [nullid]
2003 2003 if not heads:
2004 2004 heads = cl.heads()
2005 2005 return self.getlocalbundle(source,
2006 2006 discovery.outgoing(cl, common, heads))
2007 2007
2008 2008 @unfilteredmethod
2009 2009 def _changegroupsubset(self, commonrevs, csets, heads, source):
2010 2010
2011 2011 cl = self.changelog
2012 2012 mf = self.manifest
2013 2013 mfs = {} # needed manifests
2014 2014 fnodes = {} # needed file nodes
2015 2015 changedfiles = set()
2016 2016 fstate = ['', {}]
2017 2017 count = [0, 0]
2018 2018
2019 2019 # can we go through the fast path ?
2020 2020 heads.sort()
2021 2021 if heads == sorted(self.heads()):
2022 2022 return self._changegroup(csets, source)
2023 2023
2024 2024 # slow path
2025 2025 self.hook('preoutgoing', throw=True, source=source)
2026 2026 self.changegroupinfo(csets, source)
2027 2027
2028 2028 # filter any nodes that claim to be part of the known set
2029 2029 def prune(revlog, missing):
2030 2030 rr, rl = revlog.rev, revlog.linkrev
2031 2031 return [n for n in missing
2032 2032 if rl(rr(n)) not in commonrevs]
2033 2033
2034 2034 progress = self.ui.progress
2035 2035 _bundling = _('bundling')
2036 2036 _changesets = _('changesets')
2037 2037 _manifests = _('manifests')
2038 2038 _files = _('files')
2039 2039
2040 2040 def lookup(revlog, x):
2041 2041 if revlog == cl:
2042 2042 c = cl.read(x)
2043 2043 changedfiles.update(c[3])
2044 2044 mfs.setdefault(c[0], x)
2045 2045 count[0] += 1
2046 2046 progress(_bundling, count[0],
2047 2047 unit=_changesets, total=count[1])
2048 2048 return x
2049 2049 elif revlog == mf:
2050 2050 clnode = mfs[x]
2051 2051 mdata = mf.readfast(x)
2052 2052 for f, n in mdata.iteritems():
2053 2053 if f in changedfiles:
2054 2054 fnodes[f].setdefault(n, clnode)
2055 2055 count[0] += 1
2056 2056 progress(_bundling, count[0],
2057 2057 unit=_manifests, total=count[1])
2058 2058 return clnode
2059 2059 else:
2060 2060 progress(_bundling, count[0], item=fstate[0],
2061 2061 unit=_files, total=count[1])
2062 2062 return fstate[1][x]
2063 2063
2064 2064 bundler = changegroup.bundle10(lookup)
2065 2065 reorder = self.ui.config('bundle', 'reorder', 'auto')
2066 2066 if reorder == 'auto':
2067 2067 reorder = None
2068 2068 else:
2069 2069 reorder = util.parsebool(reorder)
2070 2070
2071 2071 def gengroup():
2072 2072 # Create a changenode group generator that will call our functions
2073 2073 # back to lookup the owning changenode and collect information.
2074 2074 count[:] = [0, len(csets)]
2075 2075 for chunk in cl.group(csets, bundler, reorder=reorder):
2076 2076 yield chunk
2077 2077 progress(_bundling, None)
2078 2078
2079 2079 # Create a generator for the manifestnodes that calls our lookup
2080 2080 # and data collection functions back.
2081 2081 for f in changedfiles:
2082 2082 fnodes[f] = {}
2083 2083 count[:] = [0, len(mfs)]
2084 2084 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2085 2085 yield chunk
2086 2086 progress(_bundling, None)
2087 2087
2088 2088 mfs.clear()
2089 2089
2090 2090 # Go through all our files in order sorted by name.
2091 2091 count[:] = [0, len(changedfiles)]
2092 2092 for fname in sorted(changedfiles):
2093 2093 filerevlog = self.file(fname)
2094 2094 if not len(filerevlog):
2095 2095 raise util.Abort(_("empty or missing revlog for %s")
2096 2096 % fname)
2097 2097 fstate[0] = fname
2098 2098 fstate[1] = fnodes.pop(fname, {})
2099 2099
2100 2100 nodelist = prune(filerevlog, fstate[1])
2101 2101 if nodelist:
2102 2102 count[0] += 1
2103 2103 yield bundler.fileheader(fname)
2104 2104 for chunk in filerevlog.group(nodelist, bundler, reorder):
2105 2105 yield chunk
2106 2106
2107 2107 # Signal that no more groups are left.
2108 2108 yield bundler.close()
2109 2109 progress(_bundling, None)
2110 2110
2111 2111 if csets:
2112 2112 self.hook('outgoing', node=hex(csets[0]), source=source)
2113 2113
2114 2114 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2115 2115
2116 2116 def changegroup(self, basenodes, source):
2117 2117 # to avoid a race we use changegroupsubset() (issue1320)
2118 2118 return self.changegroupsubset(basenodes, self.heads(), source)
2119 2119
2120 2120 @unfilteredmethod
2121 2121 def _changegroup(self, nodes, source):
2122 2122 """Compute the changegroup of all nodes that we have that a recipient
2123 2123 doesn't. Return a chunkbuffer object whose read() method will return
2124 2124 successive changegroup chunks.
2125 2125
2126 2126 This is much easier than the previous function as we can assume that
2127 2127 the recipient has any changenode we aren't sending them.
2128 2128
2129 2129 nodes is the set of nodes to send"""
2130 2130
2131 2131 cl = self.changelog
2132 2132 mf = self.manifest
2133 2133 mfs = {}
2134 2134 changedfiles = set()
2135 2135 fstate = ['']
2136 2136 count = [0, 0]
2137 2137
2138 2138 self.hook('preoutgoing', throw=True, source=source)
2139 2139 self.changegroupinfo(nodes, source)
2140 2140
2141 2141 revset = set([cl.rev(n) for n in nodes])
2142 2142
2143 2143 def gennodelst(log):
2144 2144 ln, llr = log.node, log.linkrev
2145 2145 return [ln(r) for r in log if llr(r) in revset]
2146 2146
2147 2147 progress = self.ui.progress
2148 2148 _bundling = _('bundling')
2149 2149 _changesets = _('changesets')
2150 2150 _manifests = _('manifests')
2151 2151 _files = _('files')
2152 2152
2153 2153 def lookup(revlog, x):
2154 2154 if revlog == cl:
2155 2155 c = cl.read(x)
2156 2156 changedfiles.update(c[3])
2157 2157 mfs.setdefault(c[0], x)
2158 2158 count[0] += 1
2159 2159 progress(_bundling, count[0],
2160 2160 unit=_changesets, total=count[1])
2161 2161 return x
2162 2162 elif revlog == mf:
2163 2163 count[0] += 1
2164 2164 progress(_bundling, count[0],
2165 2165 unit=_manifests, total=count[1])
2166 2166 return cl.node(revlog.linkrev(revlog.rev(x)))
2167 2167 else:
2168 2168 progress(_bundling, count[0], item=fstate[0],
2169 2169 total=count[1], unit=_files)
2170 2170 return cl.node(revlog.linkrev(revlog.rev(x)))
2171 2171
2172 2172 bundler = changegroup.bundle10(lookup)
2173 2173 reorder = self.ui.config('bundle', 'reorder', 'auto')
2174 2174 if reorder == 'auto':
2175 2175 reorder = None
2176 2176 else:
2177 2177 reorder = util.parsebool(reorder)
2178 2178
2179 2179 def gengroup():
2180 2180 '''yield a sequence of changegroup chunks (strings)'''
2181 2181 # construct a list of all changed files
2182 2182
2183 2183 count[:] = [0, len(nodes)]
2184 2184 for chunk in cl.group(nodes, bundler, reorder=reorder):
2185 2185 yield chunk
2186 2186 progress(_bundling, None)
2187 2187
2188 2188 count[:] = [0, len(mfs)]
2189 2189 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2190 2190 yield chunk
2191 2191 progress(_bundling, None)
2192 2192
2193 2193 count[:] = [0, len(changedfiles)]
2194 2194 for fname in sorted(changedfiles):
2195 2195 filerevlog = self.file(fname)
2196 2196 if not len(filerevlog):
2197 2197 raise util.Abort(_("empty or missing revlog for %s")
2198 2198 % fname)
2199 2199 fstate[0] = fname
2200 2200 nodelist = gennodelst(filerevlog)
2201 2201 if nodelist:
2202 2202 count[0] += 1
2203 2203 yield bundler.fileheader(fname)
2204 2204 for chunk in filerevlog.group(nodelist, bundler, reorder):
2205 2205 yield chunk
2206 2206 yield bundler.close()
2207 2207 progress(_bundling, None)
2208 2208
2209 2209 if nodes:
2210 2210 self.hook('outgoing', node=hex(nodes[0]), source=source)
2211 2211
2212 2212 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2213 2213
2214 2214 @unfilteredmethod
2215 2215 def addchangegroup(self, source, srctype, url, emptyok=False):
2216 2216 """Add the changegroup returned by source.read() to this repo.
2217 2217 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2218 2218 the URL of the repo where this changegroup is coming from.
2219 2219
2220 2220 Return an integer summarizing the change to this repo:
2221 2221 - nothing changed or no source: 0
2222 2222 - more heads than before: 1+added heads (2..n)
2223 2223 - fewer heads than before: -1-removed heads (-2..-n)
2224 2224 - number of heads stays the same: 1
2225 2225 """
2226 2226 def csmap(x):
2227 2227 self.ui.debug("add changeset %s\n" % short(x))
2228 2228 return len(cl)
2229 2229
2230 2230 def revmap(x):
2231 2231 return cl.rev(x)
2232 2232
2233 2233 if not source:
2234 2234 return 0
2235 2235
2236 2236 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2237 2237
2238 2238 changesets = files = revisions = 0
2239 2239 efiles = set()
2240 2240
2241 2241 # write changelog data to temp files so concurrent readers will not see
2242 2242 # inconsistent view
2243 2243 cl = self.changelog
2244 2244 cl.delayupdate()
2245 2245 oldheads = cl.heads()
2246 2246
2247 2247 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2248 2248 try:
2249 2249 trp = weakref.proxy(tr)
2250 2250 # pull off the changeset group
2251 2251 self.ui.status(_("adding changesets\n"))
2252 2252 clstart = len(cl)
2253 2253 class prog(object):
2254 2254 step = _('changesets')
2255 2255 count = 1
2256 2256 ui = self.ui
2257 2257 total = None
2258 2258 def __call__(self):
2259 2259 self.ui.progress(self.step, self.count, unit=_('chunks'),
2260 2260 total=self.total)
2261 2261 self.count += 1
2262 2262 pr = prog()
2263 2263 source.callback = pr
2264 2264
2265 2265 source.changelogheader()
2266 2266 srccontent = cl.addgroup(source, csmap, trp)
2267 2267 if not (srccontent or emptyok):
2268 2268 raise util.Abort(_("received changelog group is empty"))
2269 2269 clend = len(cl)
2270 2270 changesets = clend - clstart
2271 2271 for c in xrange(clstart, clend):
2272 2272 efiles.update(self[c].files())
2273 2273 efiles = len(efiles)
2274 2274 self.ui.progress(_('changesets'), None)
2275 2275
2276 2276 # pull off the manifest group
2277 2277 self.ui.status(_("adding manifests\n"))
2278 2278 pr.step = _('manifests')
2279 2279 pr.count = 1
2280 2280 pr.total = changesets # manifests <= changesets
2281 2281 # no need to check for empty manifest group here:
2282 2282 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2283 2283 # no new manifest will be created and the manifest group will
2284 2284 # be empty during the pull
2285 2285 source.manifestheader()
2286 2286 self.manifest.addgroup(source, revmap, trp)
2287 2287 self.ui.progress(_('manifests'), None)
2288 2288
2289 2289 needfiles = {}
2290 2290 if self.ui.configbool('server', 'validate', default=False):
2291 2291 # validate incoming csets have their manifests
2292 2292 for cset in xrange(clstart, clend):
2293 2293 mfest = self.changelog.read(self.changelog.node(cset))[0]
2294 2294 mfest = self.manifest.readdelta(mfest)
2295 2295 # store file nodes we must see
2296 2296 for f, n in mfest.iteritems():
2297 2297 needfiles.setdefault(f, set()).add(n)
2298 2298
2299 2299 # process the files
2300 2300 self.ui.status(_("adding file changes\n"))
2301 2301 pr.step = _('files')
2302 2302 pr.count = 1
2303 2303 pr.total = efiles
2304 2304 source.callback = None
2305 2305
2306 2306 while True:
2307 2307 chunkdata = source.filelogheader()
2308 2308 if not chunkdata:
2309 2309 break
2310 2310 f = chunkdata["filename"]
2311 2311 self.ui.debug("adding %s revisions\n" % f)
2312 2312 pr()
2313 2313 fl = self.file(f)
2314 2314 o = len(fl)
2315 2315 if not fl.addgroup(source, revmap, trp):
2316 2316 raise util.Abort(_("received file revlog group is empty"))
2317 2317 revisions += len(fl) - o
2318 2318 files += 1
2319 2319 if f in needfiles:
2320 2320 needs = needfiles[f]
2321 2321 for new in xrange(o, len(fl)):
2322 2322 n = fl.node(new)
2323 2323 if n in needs:
2324 2324 needs.remove(n)
2325 2325 if not needs:
2326 2326 del needfiles[f]
2327 2327 self.ui.progress(_('files'), None)
2328 2328
2329 2329 for f, needs in needfiles.iteritems():
2330 2330 fl = self.file(f)
2331 2331 for n in needs:
2332 2332 try:
2333 2333 fl.rev(n)
2334 2334 except error.LookupError:
2335 2335 raise util.Abort(
2336 2336 _('missing file data for %s:%s - run hg verify') %
2337 2337 (f, hex(n)))
2338 2338
2339 2339 dh = 0
2340 2340 if oldheads:
2341 2341 heads = cl.heads()
2342 2342 dh = len(heads) - len(oldheads)
2343 2343 for h in heads:
2344 2344 if h not in oldheads and self[h].closesbranch():
2345 2345 dh -= 1
2346 2346 htext = ""
2347 2347 if dh:
2348 2348 htext = _(" (%+d heads)") % dh
2349 2349
2350 2350 self.ui.status(_("added %d changesets"
2351 2351 " with %d changes to %d files%s\n")
2352 2352 % (changesets, revisions, files, htext))
2353 2353 self.invalidatevolatilesets()
2354 2354
2355 2355 if changesets > 0:
2356 2356 p = lambda: cl.writepending() and self.root or ""
2357 2357 self.hook('pretxnchangegroup', throw=True,
2358 2358 node=hex(cl.node(clstart)), source=srctype,
2359 2359 url=url, pending=p)
2360 2360
2361 2361 added = [cl.node(r) for r in xrange(clstart, clend)]
2362 2362 publishing = self.ui.configbool('phases', 'publish', True)
2363 2363 if srctype == 'push':
2364 2364 # Old server can not push the boundary themself.
2365 2365 # New server won't push the boundary if changeset already
2366 2366 # existed locally as secrete
2367 2367 #
2368 2368 # We should not use added here but the list of all change in
2369 2369 # the bundle
2370 2370 if publishing:
2371 2371 phases.advanceboundary(self, phases.public, srccontent)
2372 2372 else:
2373 2373 phases.advanceboundary(self, phases.draft, srccontent)
2374 2374 phases.retractboundary(self, phases.draft, added)
2375 2375 elif srctype != 'strip':
2376 2376 # publishing only alter behavior during push
2377 2377 #
2378 2378 # strip should not touch boundary at all
2379 2379 phases.retractboundary(self, phases.draft, added)
2380 2380
2381 2381 # make changelog see real files again
2382 2382 cl.finalize(trp)
2383 2383
2384 2384 tr.close()
2385 2385
2386 2386 if changesets > 0:
2387 2387 if srctype != 'strip':
2388 2388 # During strip, branchcache is invalid but coming call to
2389 2389 # `destroyed` will repair it.
2390 2390 # In other case we can safely update cache on disk.
2391 branchmap.updatecache(self)
2391 branchmap.updatecache(self.filtered('served'))
2392 2392 def runhooks():
2393 2393 # forcefully update the on-disk branch cache
2394 2394 self.ui.debug("updating the branch cache\n")
2395 2395 self.hook("changegroup", node=hex(cl.node(clstart)),
2396 2396 source=srctype, url=url)
2397 2397
2398 2398 for n in added:
2399 2399 self.hook("incoming", node=hex(n), source=srctype,
2400 2400 url=url)
2401 2401 self._afterlock(runhooks)
2402 2402
2403 2403 finally:
2404 2404 tr.release()
2405 2405 # never return 0 here:
2406 2406 if dh < 0:
2407 2407 return dh - 1
2408 2408 else:
2409 2409 return dh + 1
2410 2410
2411 2411 def stream_in(self, remote, requirements):
2412 2412 lock = self.lock()
2413 2413 try:
2414 2414 # Save remote branchmap. We will use it later
2415 2415 # to speed up branchcache creation
2416 2416 rbranchmap = None
2417 2417 if remote.capable("branchmap"):
2418 2418 rbranchmap = remote.branchmap()
2419 2419
2420 2420 fp = remote.stream_out()
2421 2421 l = fp.readline()
2422 2422 try:
2423 2423 resp = int(l)
2424 2424 except ValueError:
2425 2425 raise error.ResponseError(
2426 2426 _('unexpected response from remote server:'), l)
2427 2427 if resp == 1:
2428 2428 raise util.Abort(_('operation forbidden by server'))
2429 2429 elif resp == 2:
2430 2430 raise util.Abort(_('locking the remote repository failed'))
2431 2431 elif resp != 0:
2432 2432 raise util.Abort(_('the server sent an unknown error code'))
2433 2433 self.ui.status(_('streaming all changes\n'))
2434 2434 l = fp.readline()
2435 2435 try:
2436 2436 total_files, total_bytes = map(int, l.split(' ', 1))
2437 2437 except (ValueError, TypeError):
2438 2438 raise error.ResponseError(
2439 2439 _('unexpected response from remote server:'), l)
2440 2440 self.ui.status(_('%d files to transfer, %s of data\n') %
2441 2441 (total_files, util.bytecount(total_bytes)))
2442 2442 handled_bytes = 0
2443 2443 self.ui.progress(_('clone'), 0, total=total_bytes)
2444 2444 start = time.time()
2445 2445 for i in xrange(total_files):
2446 2446 # XXX doesn't support '\n' or '\r' in filenames
2447 2447 l = fp.readline()
2448 2448 try:
2449 2449 name, size = l.split('\0', 1)
2450 2450 size = int(size)
2451 2451 except (ValueError, TypeError):
2452 2452 raise error.ResponseError(
2453 2453 _('unexpected response from remote server:'), l)
2454 2454 if self.ui.debugflag:
2455 2455 self.ui.debug('adding %s (%s)\n' %
2456 2456 (name, util.bytecount(size)))
2457 2457 # for backwards compat, name was partially encoded
2458 2458 ofp = self.sopener(store.decodedir(name), 'w')
2459 2459 for chunk in util.filechunkiter(fp, limit=size):
2460 2460 handled_bytes += len(chunk)
2461 2461 self.ui.progress(_('clone'), handled_bytes,
2462 2462 total=total_bytes)
2463 2463 ofp.write(chunk)
2464 2464 ofp.close()
2465 2465 elapsed = time.time() - start
2466 2466 if elapsed <= 0:
2467 2467 elapsed = 0.001
2468 2468 self.ui.progress(_('clone'), None)
2469 2469 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2470 2470 (util.bytecount(total_bytes), elapsed,
2471 2471 util.bytecount(total_bytes / elapsed)))
2472 2472
2473 2473 # new requirements = old non-format requirements +
2474 2474 # new format-related
2475 2475 # requirements from the streamed-in repository
2476 2476 requirements.update(set(self.requirements) - self.supportedformats)
2477 2477 self._applyrequirements(requirements)
2478 2478 self._writerequirements()
2479 2479
2480 2480 if rbranchmap:
2481 2481 rbheads = []
2482 2482 for bheads in rbranchmap.itervalues():
2483 2483 rbheads.extend(bheads)
2484 2484
2485 2485 if rbheads:
2486 2486 rtiprev = max((int(self.changelog.rev(node))
2487 2487 for node in rbheads))
2488 2488 cache = branchmap.branchcache(rbranchmap,
2489 2489 self[rtiprev].node(),
2490 2490 rtiprev)
2491 2491 self._branchcaches[None] = cache
2492 2492 cache.write(self.unfiltered())
2493 2493 self.invalidate()
2494 2494 return len(self.heads()) + 1
2495 2495 finally:
2496 2496 lock.release()
2497 2497
2498 2498 def clone(self, remote, heads=[], stream=False):
2499 2499 '''clone remote repository.
2500 2500
2501 2501 keyword arguments:
2502 2502 heads: list of revs to clone (forces use of pull)
2503 2503 stream: use streaming clone if possible'''
2504 2504
2505 2505 # now, all clients that can request uncompressed clones can
2506 2506 # read repo formats supported by all servers that can serve
2507 2507 # them.
2508 2508
2509 2509 # if revlog format changes, client will have to check version
2510 2510 # and format flags on "stream" capability, and use
2511 2511 # uncompressed only if compatible.
2512 2512
2513 2513 if not stream:
2514 2514 # if the server explicitly prefers to stream (for fast LANs)
2515 2515 stream = remote.capable('stream-preferred')
2516 2516
2517 2517 if stream and not heads:
2518 2518 # 'stream' means remote revlog format is revlogv1 only
2519 2519 if remote.capable('stream'):
2520 2520 return self.stream_in(remote, set(('revlogv1',)))
2521 2521 # otherwise, 'streamreqs' contains the remote revlog format
2522 2522 streamreqs = remote.capable('streamreqs')
2523 2523 if streamreqs:
2524 2524 streamreqs = set(streamreqs.split(','))
2525 2525 # if we support it, stream in and adjust our requirements
2526 2526 if not streamreqs - self.supportedformats:
2527 2527 return self.stream_in(remote, streamreqs)
2528 2528 return self.pull(remote, heads)
2529 2529
2530 2530 def pushkey(self, namespace, key, old, new):
2531 2531 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2532 2532 old=old, new=new)
2533 2533 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2534 2534 ret = pushkey.push(self, namespace, key, old, new)
2535 2535 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2536 2536 ret=ret)
2537 2537 return ret
2538 2538
2539 2539 def listkeys(self, namespace):
2540 2540 self.hook('prelistkeys', throw=True, namespace=namespace)
2541 2541 self.ui.debug('listing keys for "%s"\n' % namespace)
2542 2542 values = pushkey.list(self, namespace)
2543 2543 self.hook('listkeys', namespace=namespace, values=values)
2544 2544 return values
2545 2545
2546 2546 def debugwireargs(self, one, two, three=None, four=None, five=None):
2547 2547 '''used to test argument passing over the wire'''
2548 2548 return "%s %s %s %s %s" % (one, two, three, four, five)
2549 2549
2550 2550 def savecommitmessage(self, text):
2551 2551 fp = self.opener('last-message.txt', 'wb')
2552 2552 try:
2553 2553 fp.write(text)
2554 2554 finally:
2555 2555 fp.close()
2556 2556 return self.pathto(fp.name[len(self.root) + 1:])
2557 2557
2558 2558 # used to avoid circular references so destructors work
2559 2559 def aftertrans(files):
2560 2560 renamefiles = [tuple(t) for t in files]
2561 2561 def a():
2562 2562 for src, dest in renamefiles:
2563 2563 try:
2564 2564 util.rename(src, dest)
2565 2565 except OSError: # journal file does not yet exist
2566 2566 pass
2567 2567 return a
2568 2568
2569 2569 def undoname(fn):
2570 2570 base, name = os.path.split(fn)
2571 2571 assert name.startswith('journal')
2572 2572 return os.path.join(base, name.replace('journal', 'undo', 1))
2573 2573
2574 2574 def instance(ui, path, create):
2575 2575 return localrepository(ui, util.urllocalpath(path), create)
2576 2576
2577 2577 def islocal(path):
2578 2578 return True
@@ -1,1155 +1,1153 b''
1 1 $ cat <<EOF >> $HGRCPATH
2 2 > [extensions]
3 3 > keyword =
4 4 > mq =
5 5 > notify =
6 6 > record =
7 7 > transplant =
8 8 > [ui]
9 9 > interactive = true
10 10 > EOF
11 11
12 12 hide outer repo
13 13 $ hg init
14 14
15 15 Run kwdemo before [keyword] files are set up
16 16 as it would succeed without uisetup otherwise
17 17
18 18 $ hg --quiet kwdemo
19 19 [extensions]
20 20 keyword =
21 21 [keyword]
22 22 demo.txt =
23 23 [keywordset]
24 24 svn = False
25 25 [keywordmaps]
26 26 Author = {author|user}
27 27 Date = {date|utcdate}
28 28 Header = {root}/{file},v {node|short} {date|utcdate} {author|user}
29 29 Id = {file|basename},v {node|short} {date|utcdate} {author|user}
30 30 RCSFile = {file|basename},v
31 31 RCSfile = {file|basename},v
32 32 Revision = {node|short}
33 33 Source = {root}/{file},v
34 34 $Author: test $
35 35 $Date: ????/??/?? ??:??:?? $ (glob)
36 36 $Header: */demo.txt,v ???????????? ????/??/?? ??:??:?? test $ (glob)
37 37 $Id: demo.txt,v ???????????? ????/??/?? ??:??:?? test $ (glob)
38 38 $RCSFile: demo.txt,v $
39 39 $RCSfile: demo.txt,v $
40 40 $Revision: ???????????? $ (glob)
41 41 $Source: */demo.txt,v $ (glob)
42 42
43 43 $ hg --quiet kwdemo "Branch = {branches}"
44 44 [extensions]
45 45 keyword =
46 46 [keyword]
47 47 demo.txt =
48 48 [keywordset]
49 49 svn = False
50 50 [keywordmaps]
51 51 Branch = {branches}
52 52 $Branch: demobranch $
53 53
54 54 $ cat <<EOF >> $HGRCPATH
55 55 > [keyword]
56 56 > ** =
57 57 > b = ignore
58 58 > i = ignore
59 59 > [hooks]
60 60 > EOF
61 61 $ cp $HGRCPATH $HGRCPATH.nohooks
62 62 > cat <<EOF >> $HGRCPATH
63 63 > commit=
64 64 > commit.test=cp a hooktest
65 65 > EOF
66 66
67 67 $ hg init Test-bndl
68 68 $ cd Test-bndl
69 69
70 70 kwshrink should exit silently in empty/invalid repo
71 71
72 72 $ hg kwshrink
73 73
74 74 Symlinks cannot be created on Windows.
75 75 A bundle to test this was made with:
76 76 hg init t
77 77 cd t
78 78 echo a > a
79 79 ln -s a sym
80 80 hg add sym
81 81 hg ci -m addsym -u mercurial
82 82 hg bundle --base null ../test-keyword.hg
83 83
84 84 $ hg pull -u "$TESTDIR"/bundles/test-keyword.hg
85 85 pulling from *test-keyword.hg (glob)
86 86 requesting all changes
87 87 adding changesets
88 88 adding manifests
89 89 adding file changes
90 90 added 1 changesets with 1 changes to 1 files
91 91 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 92
93 93 $ echo 'expand $Id$' > a
94 94 $ echo 'do not process $Id:' >> a
95 95 $ echo 'xxx $' >> a
96 96 $ echo 'ignore $Id$' > b
97 97
98 98 Output files as they were created
99 99
100 100 $ cat a b
101 101 expand $Id$
102 102 do not process $Id:
103 103 xxx $
104 104 ignore $Id$
105 105
106 106 no kwfiles
107 107
108 108 $ hg kwfiles
109 109
110 110 untracked candidates
111 111
112 112 $ hg -v kwfiles --unknown
113 113 k a
114 114
115 115 Add files and check status
116 116
117 117 $ hg addremove
118 118 adding a
119 119 adding b
120 120 $ hg status
121 121 A a
122 122 A b
123 123
124 124
125 125 Default keyword expansion including commit hook
126 126 Interrupted commit should not change state or run commit hook
127 127
128 128 $ hg --debug commit
129 129 abort: empty commit message
130 130 [255]
131 131 $ hg status
132 132 A a
133 133 A b
134 134
135 135 Commit with several checks
136 136
137 137 $ hg --debug commit -mabsym -u 'User Name <user@example.com>'
138 138 a
139 139 b
140 140 overwriting a expanding keywords
141 141 running hook commit.test: cp a hooktest
142 142 committed changeset 1:ef63ca68695bc9495032c6fda1350c71e6d256e9
143 143 $ hg status
144 144 ? hooktest
145 145 $ hg debugrebuildstate
146 146 $ hg --quiet identify
147 147 ef63ca68695b
148 148
149 149 cat files in working directory with keywords expanded
150 150
151 151 $ cat a b
152 152 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
153 153 do not process $Id:
154 154 xxx $
155 155 ignore $Id$
156 156
157 157 hg cat files and symlink, no expansion
158 158
159 159 $ hg cat sym a b && echo
160 160 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
161 161 do not process $Id:
162 162 xxx $
163 163 ignore $Id$
164 164 a
165 165
166 166 $ diff a hooktest
167 167
168 168 $ cp $HGRCPATH.nohooks $HGRCPATH
169 169 $ rm hooktest
170 170
171 171 hg status of kw-ignored binary file starting with '\1\n'
172 172
173 173 >>> open("i", "wb").write("\1\nfoo")
174 174 $ hg -q commit -Am metasep i
175 175 $ hg status
176 176 >>> open("i", "wb").write("\1\nbar")
177 177 $ hg status
178 178 M i
179 179 $ hg -q commit -m "modify metasep" i
180 180 $ hg status --rev 2:3
181 181 M i
182 182 $ touch empty
183 183 $ hg -q commit -A -m "another file"
184 184 $ hg status -A --rev 3:4 i
185 185 C i
186 186
187 187 $ hg -q strip -n 2
188 188
189 189 Test hook execution
190 190
191 191 bundle
192 192
193 193 $ hg bundle --base null ../kw.hg
194 194 2 changesets found
195 195 $ cd ..
196 196 $ hg init Test
197 197 $ cd Test
198 198
199 199 Notify on pull to check whether keywords stay as is in email
200 200 ie. if patch.diff wrapper acts as it should
201 201
202 202 $ cat <<EOF >> $HGRCPATH
203 203 > [hooks]
204 204 > incoming.notify = python:hgext.notify.hook
205 205 > [notify]
206 206 > sources = pull
207 207 > diffstat = False
208 208 > maxsubject = 15
209 209 > [reposubs]
210 210 > * = Test
211 211 > EOF
212 212
213 213 Pull from bundle and trigger notify
214 214
215 215 $ hg pull -u ../kw.hg
216 216 pulling from ../kw.hg
217 217 requesting all changes
218 218 adding changesets
219 219 adding manifests
220 220 adding file changes
221 221 added 2 changesets with 3 changes to 3 files
222 222 Content-Type: text/plain; charset="us-ascii"
223 223 MIME-Version: 1.0
224 224 Content-Transfer-Encoding: 7bit
225 225 Date: * (glob)
226 226 Subject: changeset in...
227 227 From: mercurial
228 228 X-Hg-Notification: changeset a2392c293916
229 229 Message-Id: <hg.a2392c293916*> (glob)
230 230 To: Test
231 231
232 232 changeset a2392c293916 in $TESTTMP/Test (glob)
233 233 details: $TESTTMP/Test?cmd=changeset;node=a2392c293916
234 234 description:
235 235 addsym
236 236
237 237 diffs (6 lines):
238 238
239 239 diff -r 000000000000 -r a2392c293916 sym
240 240 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
241 241 +++ b/sym Sat Feb 09 20:25:47 2008 +0100
242 242 @@ -0,0 +1,1 @@
243 243 +a
244 244 \ No newline at end of file
245 245 Content-Type: text/plain; charset="us-ascii"
246 246 MIME-Version: 1.0
247 247 Content-Transfer-Encoding: 7bit
248 248 Date:* (glob)
249 249 Subject: changeset in...
250 250 From: User Name <user@example.com>
251 251 X-Hg-Notification: changeset ef63ca68695b
252 252 Message-Id: <hg.ef63ca68695b*> (glob)
253 253 To: Test
254 254
255 255 changeset ef63ca68695b in $TESTTMP/Test (glob)
256 256 details: $TESTTMP/Test?cmd=changeset;node=ef63ca68695b
257 257 description:
258 258 absym
259 259
260 260 diffs (12 lines):
261 261
262 262 diff -r a2392c293916 -r ef63ca68695b a
263 263 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
264 264 +++ b/a Thu Jan 01 00:00:00 1970 +0000
265 265 @@ -0,0 +1,3 @@
266 266 +expand $Id$
267 267 +do not process $Id:
268 268 +xxx $
269 269 diff -r a2392c293916 -r ef63ca68695b b
270 270 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
271 271 +++ b/b Thu Jan 01 00:00:00 1970 +0000
272 272 @@ -0,0 +1,1 @@
273 273 +ignore $Id$
274 274 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
275 275
276 276 $ cp $HGRCPATH.nohooks $HGRCPATH
277 277
278 278 Touch files and check with status
279 279
280 280 $ touch a b
281 281 $ hg status
282 282
283 283 Update and expand
284 284
285 285 $ rm sym a b
286 286 $ hg update -C
287 287 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
288 288 $ cat a b
289 289 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
290 290 do not process $Id:
291 291 xxx $
292 292 ignore $Id$
293 293
294 294 Check whether expansion is filewise and file mode is preserved
295 295
296 296 $ echo '$Id$' > c
297 297 $ echo 'tests for different changenodes' >> c
298 298 #if unix-permissions
299 299 $ chmod 600 c
300 300 $ ls -l c | cut -b 1-10
301 301 -rw-------
302 302 #endif
303 303
304 304 commit file c
305 305
306 306 $ hg commit -A -mcndiff -d '1 0' -u 'User Name <user@example.com>'
307 307 adding c
308 308 #if unix-permissions
309 309 $ ls -l c | cut -b 1-10
310 310 -rw-------
311 311 #endif
312 312
313 313 force expansion
314 314
315 315 $ hg -v kwexpand
316 316 overwriting a expanding keywords
317 317 overwriting c expanding keywords
318 318
319 319 compare changenodes in a and c
320 320
321 321 $ cat a c
322 322 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
323 323 do not process $Id:
324 324 xxx $
325 325 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
326 326 tests for different changenodes
327 327
328 328 record
329 329
330 330 $ echo '$Id$' > r
331 331 $ hg add r
332 332
333 333 record chunk
334 334
335 335 >>> lines = open('a', 'rb').readlines()
336 336 >>> lines.insert(1, 'foo\n')
337 337 >>> lines.append('bar\n')
338 338 >>> open('a', 'wb').writelines(lines)
339 339 $ hg record -d '10 1' -m rectest a<<EOF
340 340 > y
341 341 > y
342 342 > n
343 343 > EOF
344 344 diff --git a/a b/a
345 345 2 hunks, 2 lines changed
346 346 examine changes to 'a'? [Ynesfdaq?]
347 347 @@ -1,3 +1,4 @@
348 348 expand $Id$
349 349 +foo
350 350 do not process $Id:
351 351 xxx $
352 352 record change 1/2 to 'a'? [Ynesfdaq?]
353 353 @@ -2,2 +3,3 @@
354 354 do not process $Id:
355 355 xxx $
356 356 +bar
357 357 record change 2/2 to 'a'? [Ynesfdaq?]
358 358
359 359 $ hg identify
360 360 5f5eb23505c3+ tip
361 361 $ hg status
362 362 M a
363 363 A r
364 364
365 365 Cat modified file a
366 366
367 367 $ cat a
368 368 expand $Id: a,v 5f5eb23505c3 1970/01/01 00:00:10 test $
369 369 foo
370 370 do not process $Id:
371 371 xxx $
372 372 bar
373 373
374 374 Diff remaining chunk
375 375
376 376 $ hg diff a
377 377 diff -r 5f5eb23505c3 a
378 378 --- a/a Thu Jan 01 00:00:09 1970 -0000
379 379 +++ b/a * (glob)
380 380 @@ -2,3 +2,4 @@
381 381 foo
382 382 do not process $Id:
383 383 xxx $
384 384 +bar
385 385
386 386 $ hg rollback
387 387 repository tip rolled back to revision 2 (undo commit)
388 388 working directory now based on revision 2
389 389
390 390 Record all chunks in file a
391 391
392 392 $ echo foo > msg
393 393
394 394 - do not use "hg record -m" here!
395 395
396 396 $ hg record -l msg -d '11 1' a<<EOF
397 397 > y
398 398 > y
399 399 > y
400 400 > EOF
401 401 diff --git a/a b/a
402 402 2 hunks, 2 lines changed
403 403 examine changes to 'a'? [Ynesfdaq?]
404 404 @@ -1,3 +1,4 @@
405 405 expand $Id$
406 406 +foo
407 407 do not process $Id:
408 408 xxx $
409 409 record change 1/2 to 'a'? [Ynesfdaq?]
410 410 @@ -2,2 +3,3 @@
411 411 do not process $Id:
412 412 xxx $
413 413 +bar
414 414 record change 2/2 to 'a'? [Ynesfdaq?]
415 415
416 416 File a should be clean
417 417
418 418 $ hg status -A a
419 419 C a
420 420
421 421 rollback and revert expansion
422 422
423 423 $ cat a
424 424 expand $Id: a,v 78e0a02d76aa 1970/01/01 00:00:11 test $
425 425 foo
426 426 do not process $Id:
427 427 xxx $
428 428 bar
429 429 $ hg --verbose rollback
430 430 repository tip rolled back to revision 2 (undo commit)
431 431 working directory now based on revision 2
432 432 overwriting a expanding keywords
433 433 $ hg status a
434 434 M a
435 435 $ cat a
436 436 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
437 437 foo
438 438 do not process $Id:
439 439 xxx $
440 440 bar
441 441 $ echo '$Id$' > y
442 442 $ echo '$Id$' > z
443 443 $ hg add y
444 444 $ hg commit -Am "rollback only" z
445 445 $ cat z
446 446 $Id: z,v 45a5d3adce53 1970/01/01 00:00:00 test $
447 447 $ hg --verbose rollback
448 448 repository tip rolled back to revision 2 (undo commit)
449 449 working directory now based on revision 2
450 450 overwriting z shrinking keywords
451 451
452 452 Only z should be overwritten
453 453
454 454 $ hg status a y z
455 455 M a
456 456 A y
457 457 A z
458 458 $ cat z
459 459 $Id$
460 460 $ hg forget y z
461 461 $ rm y z
462 462
463 463 record added file alone
464 464
465 465 $ hg -v record -l msg -d '12 2' r<<EOF
466 466 > y
467 467 > EOF
468 468 diff --git a/r b/r
469 469 new file mode 100644
470 470 examine changes to 'r'? [Ynesfdaq?]
471 471 r
472 472 committed changeset 3:82a2f715724d
473 473 overwriting r expanding keywords
474 474 - status call required for dirstate.normallookup() check
475 475 $ hg status r
476 476 $ hg --verbose rollback
477 477 repository tip rolled back to revision 2 (undo commit)
478 478 working directory now based on revision 2
479 479 overwriting r shrinking keywords
480 480 $ hg forget r
481 481 $ rm msg r
482 482 $ hg update -C
483 483 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
484 484
485 485 record added keyword ignored file
486 486
487 487 $ echo '$Id$' > i
488 488 $ hg add i
489 489 $ hg --verbose record -d '13 1' -m recignored<<EOF
490 490 > y
491 491 > EOF
492 492 diff --git a/i b/i
493 493 new file mode 100644
494 494 examine changes to 'i'? [Ynesfdaq?]
495 495 i
496 496 committed changeset 3:9f40ceb5a072
497 497 $ cat i
498 498 $Id$
499 499 $ hg -q rollback
500 500 $ hg forget i
501 501 $ rm i
502 502
503 503 amend
504 504
505 505 $ echo amend >> a
506 506 $ echo amend >> b
507 507 $ hg -q commit -d '14 1' -m 'prepare amend'
508 508
509 509 $ hg --debug commit --amend -d '15 1' -m 'amend without changes' | grep keywords
510 510 overwriting a expanding keywords
511 511 $ hg -q id
512 512 67d8c481a6be
513 513 $ head -1 a
514 514 expand $Id: a,v 67d8c481a6be 1970/01/01 00:00:15 test $
515 515
516 516 $ hg -q strip -n tip
517 517
518 518 Test patch queue repo
519 519
520 520 $ hg init --mq
521 521 $ hg qimport -r tip -n mqtest.diff
522 522 $ hg commit --mq -m mqtest
523 523
524 524 Keywords should not be expanded in patch
525 525
526 526 $ cat .hg/patches/mqtest.diff
527 527 # HG changeset patch
528 528 # User User Name <user@example.com>
529 529 # Date 1 0
530 530 # Node ID 40a904bbbe4cd4ab0a1f28411e35db26341a40ad
531 531 # Parent ef63ca68695bc9495032c6fda1350c71e6d256e9
532 532 cndiff
533 533
534 534 diff -r ef63ca68695b -r 40a904bbbe4c c
535 535 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
536 536 +++ b/c Thu Jan 01 00:00:01 1970 +0000
537 537 @@ -0,0 +1,2 @@
538 538 +$Id$
539 539 +tests for different changenodes
540 540
541 541 $ hg qpop
542 542 popping mqtest.diff
543 543 patch queue now empty
544 544
545 545 qgoto, implying qpush, should expand
546 546
547 547 $ hg qgoto mqtest.diff
548 548 applying mqtest.diff
549 549 now at: mqtest.diff
550 550 $ cat c
551 551 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
552 552 tests for different changenodes
553 553 $ hg cat c
554 554 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
555 555 tests for different changenodes
556 556
557 557 Keywords should not be expanded in filelog
558 558
559 559 $ hg --config 'extensions.keyword=!' cat c
560 560 $Id$
561 561 tests for different changenodes
562 562
563 563 qpop and move on
564 564
565 565 $ hg qpop
566 566 popping mqtest.diff
567 567 patch queue now empty
568 568
569 569 Copy and show added kwfiles
570 570
571 571 $ hg cp a c
572 572 $ hg kwfiles
573 573 a
574 574 c
575 575
576 576 Commit and show expansion in original and copy
577 577
578 578 $ hg --debug commit -ma2c -d '1 0' -u 'User Name <user@example.com>'
579 579 invalid branchheads cache (served): tip differs
580 580 c
581 581 c: copy a:0045e12f6c5791aac80ca6cbfd97709a88307292
582 invalid branchheads cache (served): tip differs
582 583 overwriting c expanding keywords
583 584 committed changeset 2:25736cf2f5cbe41f6be4e6784ef6ecf9f3bbcc7d
584 585 $ cat a c
585 586 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
586 587 do not process $Id:
587 588 xxx $
588 589 expand $Id: c,v 25736cf2f5cb 1970/01/01 00:00:01 user $
589 590 do not process $Id:
590 591 xxx $
591 592
592 593 Touch copied c and check its status
593 594
594 595 $ touch c
595 596 $ hg status
596 597
597 598 Copy kwfile to keyword ignored file unexpanding keywords
598 599
599 600 $ hg --verbose copy a i
600 601 copying a to i
601 602 overwriting i shrinking keywords
602 603 $ head -n 1 i
603 604 expand $Id$
604 605 $ hg forget i
605 606 $ rm i
606 607
607 608 Copy ignored file to ignored file: no overwriting
608 609
609 610 $ hg --verbose copy b i
610 611 copying b to i
611 612 $ hg forget i
612 613 $ rm i
613 614
614 615 cp symlink file; hg cp -A symlink file (part1)
615 616 - copied symlink points to kwfile: overwrite
616 617
617 618 #if symlink
618 619 $ cp sym i
619 620 $ ls -l i
620 621 -rw-r--r--* (glob)
621 622 $ head -1 i
622 623 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
623 624 $ hg copy --after --verbose sym i
624 625 copying sym to i
625 626 overwriting i shrinking keywords
626 627 $ head -1 i
627 628 expand $Id$
628 629 $ hg forget i
629 630 $ rm i
630 631 #endif
631 632
632 633 Test different options of hg kwfiles
633 634
634 635 $ hg kwfiles
635 636 a
636 637 c
637 638 $ hg -v kwfiles --ignore
638 639 I b
639 640 I sym
640 641 $ hg kwfiles --all
641 642 K a
642 643 K c
643 644 I b
644 645 I sym
645 646
646 647 Diff specific revision
647 648
648 649 $ hg diff --rev 1
649 650 diff -r ef63ca68695b c
650 651 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
651 652 +++ b/c * (glob)
652 653 @@ -0,0 +1,3 @@
653 654 +expand $Id$
654 655 +do not process $Id:
655 656 +xxx $
656 657
657 658 Status after rollback:
658 659
659 660 $ hg rollback
660 661 repository tip rolled back to revision 1 (undo commit)
661 662 working directory now based on revision 1
662 663 $ hg status
663 664 A c
664 665 $ hg update --clean
665 666 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
666 667
667 668 #if symlink
668 669
669 670 cp symlink file; hg cp -A symlink file (part2)
670 671 - copied symlink points to kw ignored file: do not overwrite
671 672
672 673 $ cat a > i
673 674 $ ln -s i symignored
674 675 $ hg commit -Am 'fake expansion in ignored and symlink' i symignored
675 676 $ cp symignored x
676 677 $ hg copy --after --verbose symignored x
677 678 copying symignored to x
678 679 $ head -n 1 x
679 680 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
680 681 $ hg forget x
681 682 $ rm x
682 683
683 684 $ hg rollback
684 685 repository tip rolled back to revision 1 (undo commit)
685 686 working directory now based on revision 1
686 687 $ hg update --clean
687 688 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
688 689 $ rm i symignored
689 690
690 691 #endif
691 692
692 693 Custom keywordmaps as argument to kwdemo
693 694
694 695 $ hg --quiet kwdemo "Xinfo = {author}: {desc}"
695 696 [extensions]
696 697 keyword =
697 698 [keyword]
698 699 ** =
699 700 b = ignore
700 701 demo.txt =
701 702 i = ignore
702 703 [keywordset]
703 704 svn = False
704 705 [keywordmaps]
705 706 Xinfo = {author}: {desc}
706 707 $Xinfo: test: hg keyword configuration and expansion example $
707 708
708 709 Configure custom keywordmaps
709 710
710 711 $ cat <<EOF >>$HGRCPATH
711 712 > [keywordmaps]
712 713 > Id = {file} {node|short} {date|rfc822date} {author|user}
713 714 > Xinfo = {author}: {desc}
714 715 > EOF
715 716
716 717 Cat and hg cat files before custom expansion
717 718
718 719 $ cat a b
719 720 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
720 721 do not process $Id:
721 722 xxx $
722 723 ignore $Id$
723 724 $ hg cat sym a b && echo
724 725 expand $Id: a ef63ca68695b Thu, 01 Jan 1970 00:00:00 +0000 user $
725 726 do not process $Id:
726 727 xxx $
727 728 ignore $Id$
728 729 a
729 730
730 731 Write custom keyword and prepare multi-line commit message
731 732
732 733 $ echo '$Xinfo$' >> a
733 734 $ cat <<EOF >> log
734 735 > firstline
735 736 > secondline
736 737 > EOF
737 738
738 739 Interrupted commit should not change state
739 740
740 741 $ hg commit
741 742 abort: empty commit message
742 743 [255]
743 744 $ hg status
744 745 M a
745 746 ? c
746 747 ? log
747 748
748 749 Commit with multi-line message and custom expansion
749 750
750 751 |Note:
751 752 |
752 753 | After the last rollback, the "unserved" branchheads cache became invalid, but
753 754 | all changesets in the repo were public. For filtering this means:
754 755 | "mutable" == "unserved" == ΓΈ.
755 756 |
756 757 | As the "unserved" cache is invalid, we fall back to the "mutable" cache. But
757 758 | no update is needed between "mutable" and "unserved" and the "unserved" cache
758 759 | is not updated on disk. The on-disk version therefore stays invalid for some
759 760 | time. This explains why the "unserved" branchheads cache is detected as
760 761 | invalid here.
761 762
762 763 $ hg --debug commit -l log -d '2 0' -u 'User Name <user@example.com>'
763 764 invalid branchheads cache (served): tip differs
764 765 a
765 invalid branchheads cache: tip differs
766 766 invalid branchheads cache (served): tip differs
767 767 overwriting a expanding keywords
768 768 committed changeset 2:bb948857c743469b22bbf51f7ec8112279ca5d83
769 769 $ rm log
770 770
771 771 Stat, verify and show custom expansion (firstline)
772 772
773 773 $ hg status
774 774 ? c
775 775 $ hg verify
776 776 checking changesets
777 777 checking manifests
778 778 crosschecking files in changesets and manifests
779 779 checking files
780 780 3 files, 3 changesets, 4 total revisions
781 781 $ cat a b
782 782 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
783 783 do not process $Id:
784 784 xxx $
785 785 $Xinfo: User Name <user@example.com>: firstline $
786 786 ignore $Id$
787 787 $ hg cat sym a b && echo
788 788 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
789 789 do not process $Id:
790 790 xxx $
791 791 $Xinfo: User Name <user@example.com>: firstline $
792 792 ignore $Id$
793 793 a
794 794
795 795 annotate
796 796
797 797 $ hg annotate a
798 798 1: expand $Id$
799 799 1: do not process $Id:
800 800 1: xxx $
801 801 2: $Xinfo$
802 802
803 803 remove with status checks
804 804
805 805 $ hg debugrebuildstate
806 806 $ hg remove a
807 807 $ hg --debug commit -m rma
808 invalid branchheads cache: tip differs
809 808 committed changeset 3:d14c712653769de926994cf7fbb06c8fbd68f012
810 809 $ hg status
811 810 ? c
812 811
813 812 Rollback, revert, and check expansion
814 813
815 814 $ hg rollback
816 815 repository tip rolled back to revision 2 (undo commit)
817 816 working directory now based on revision 2
818 817 $ hg status
819 818 R a
820 819 ? c
821 820 $ hg revert --no-backup --rev tip a
822 821 $ cat a
823 822 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
824 823 do not process $Id:
825 824 xxx $
826 825 $Xinfo: User Name <user@example.com>: firstline $
827 826
828 827 Clone to test global and local configurations
829 828
830 829 $ cd ..
831 830
832 831 Expansion in destination with global configuration
833 832
834 833 $ hg --quiet clone Test globalconf
835 834 $ cat globalconf/a
836 835 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
837 836 do not process $Id:
838 837 xxx $
839 838 $Xinfo: User Name <user@example.com>: firstline $
840 839
841 840 No expansion in destination with local configuration in origin only
842 841
843 842 $ hg --quiet --config 'keyword.**=ignore' clone Test localconf
844 843 $ cat localconf/a
845 844 expand $Id$
846 845 do not process $Id:
847 846 xxx $
848 847 $Xinfo$
849 848
850 849 Clone to test incoming
851 850
852 851 $ hg clone -r1 Test Test-a
853 852 adding changesets
854 853 adding manifests
855 854 adding file changes
856 855 added 2 changesets with 3 changes to 3 files
857 856 updating to branch default
858 857 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
859 858 $ cd Test-a
860 859 $ cat <<EOF >> .hg/hgrc
861 860 > [paths]
862 861 > default = ../Test
863 862 > EOF
864 863 $ hg incoming
865 864 comparing with $TESTTMP/Test (glob)
866 865 searching for changes
867 866 changeset: 2:bb948857c743
868 867 tag: tip
869 868 user: User Name <user@example.com>
870 869 date: Thu Jan 01 00:00:02 1970 +0000
871 870 summary: firstline
872 871
873 872 Imported patch should not be rejected
874 873
875 874 >>> import re
876 875 >>> text = re.sub(r'(Id.*)', r'\1 rejecttest', open('a').read())
877 876 >>> open('a', 'wb').write(text)
878 877 $ hg --debug commit -m'rejects?' -d '3 0' -u 'User Name <user@example.com>'
879 878 a
880 879 overwriting a expanding keywords
881 880 committed changeset 2:85e279d709ffc28c9fdd1b868570985fc3d87082
882 881 $ hg export -o ../rejecttest.diff tip
883 882 $ cd ../Test
884 883 $ hg import ../rejecttest.diff
885 884 applying ../rejecttest.diff
886 885 $ cat a b
887 886 expand $Id: a 4e0994474d25 Thu, 01 Jan 1970 00:00:03 +0000 user $ rejecttest
888 887 do not process $Id: rejecttest
889 888 xxx $
890 889 $Xinfo: User Name <user@example.com>: rejects? $
891 890 ignore $Id$
892 891
893 892 $ hg rollback
894 893 repository tip rolled back to revision 2 (undo import)
895 894 working directory now based on revision 2
896 895 $ hg update --clean
897 896 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
898 897
899 898 kwexpand/kwshrink on selected files
900 899
901 900 $ mkdir x
902 901 $ hg copy a x/a
903 902 $ hg --verbose kwshrink a
904 903 overwriting a shrinking keywords
905 904 - sleep required for dirstate.normal() check
906 905 $ sleep 1
907 906 $ hg status a
908 907 $ hg --verbose kwexpand a
909 908 overwriting a expanding keywords
910 909 $ hg status a
911 910
912 911 kwexpand x/a should abort
913 912
914 913 $ hg --verbose kwexpand x/a
915 914 abort: outstanding uncommitted changes
916 915 [255]
917 916 $ cd x
918 917 $ hg --debug commit -m xa -d '3 0' -u 'User Name <user@example.com>'
919 918 x/a
920 919 x/a: copy a:779c764182ce5d43e2b1eb66ce06d7b47bfe342e
921 invalid branchheads cache: tip differs
922 920 overwriting x/a expanding keywords
923 921 committed changeset 3:b4560182a3f9a358179fd2d835c15e9da379c1e4
924 922 $ cat a
925 923 expand $Id: x/a b4560182a3f9 Thu, 01 Jan 1970 00:00:03 +0000 user $
926 924 do not process $Id:
927 925 xxx $
928 926 $Xinfo: User Name <user@example.com>: xa $
929 927
930 928 kwshrink a inside directory x
931 929
932 930 $ hg --verbose kwshrink a
933 931 overwriting x/a shrinking keywords
934 932 $ cat a
935 933 expand $Id$
936 934 do not process $Id:
937 935 xxx $
938 936 $Xinfo$
939 937 $ cd ..
940 938
941 939 kwexpand nonexistent
942 940
943 941 $ hg kwexpand nonexistent
944 942 nonexistent:* (glob)
945 943
946 944
947 945 #if serve
948 946 hg serve
949 947 - expand with hgweb file
950 948 - no expansion with hgweb annotate/changeset/filediff
951 949 - check errors
952 950
953 951 $ hg serve -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
954 952 $ cat hg.pid >> $DAEMON_PIDS
955 953 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'file/tip/a/?style=raw'
956 954 200 Script output follows
957 955
958 956 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
959 957 do not process $Id:
960 958 xxx $
961 959 $Xinfo: User Name <user@example.com>: firstline $
962 960 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'annotate/tip/a/?style=raw'
963 961 200 Script output follows
964 962
965 963
966 964 user@1: expand $Id$
967 965 user@1: do not process $Id:
968 966 user@1: xxx $
969 967 user@2: $Xinfo$
970 968
971 969
972 970
973 971
974 972 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'rev/tip/?style=raw'
975 973 200 Script output follows
976 974
977 975
978 976 # HG changeset patch
979 977 # User User Name <user@example.com>
980 978 # Date 3 0
981 979 # Node ID b4560182a3f9a358179fd2d835c15e9da379c1e4
982 980 # Parent bb948857c743469b22bbf51f7ec8112279ca5d83
983 981 xa
984 982
985 983 diff -r bb948857c743 -r b4560182a3f9 x/a
986 984 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
987 985 +++ b/x/a Thu Jan 01 00:00:03 1970 +0000
988 986 @@ -0,0 +1,4 @@
989 987 +expand $Id$
990 988 +do not process $Id:
991 989 +xxx $
992 990 +$Xinfo$
993 991
994 992 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'diff/bb948857c743/a?style=raw'
995 993 200 Script output follows
996 994
997 995
998 996 diff -r ef63ca68695b -r bb948857c743 a
999 997 --- a/a Thu Jan 01 00:00:00 1970 +0000
1000 998 +++ b/a Thu Jan 01 00:00:02 1970 +0000
1001 999 @@ -1,3 +1,4 @@
1002 1000 expand $Id$
1003 1001 do not process $Id:
1004 1002 xxx $
1005 1003 +$Xinfo$
1006 1004
1007 1005
1008 1006
1009 1007
1010 1008 $ cat errors.log
1011 1009 #endif
1012 1010
1013 1011 Prepare merge and resolve tests
1014 1012
1015 1013 $ echo '$Id$' > m
1016 1014 $ hg add m
1017 1015 $ hg commit -m 4kw
1018 1016 $ echo foo >> m
1019 1017 $ hg commit -m 5foo
1020 1018
1021 1019 simplemerge
1022 1020
1023 1021 $ hg update 4
1024 1022 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1025 1023 $ echo foo >> m
1026 1024 $ hg commit -m 6foo
1027 1025 created new head
1028 1026 $ hg merge
1029 1027 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1030 1028 (branch merge, don't forget to commit)
1031 1029 $ hg commit -m simplemerge
1032 1030 $ cat m
1033 1031 $Id: m 27d48ee14f67 Thu, 01 Jan 1970 00:00:00 +0000 test $
1034 1032 foo
1035 1033
1036 1034 conflict: keyword should stay outside conflict zone
1037 1035
1038 1036 $ hg update 4
1039 1037 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1040 1038 $ echo bar >> m
1041 1039 $ hg commit -m 8bar
1042 1040 created new head
1043 1041 $ hg merge
1044 1042 merging m
1045 1043 warning: conflicts during merge.
1046 1044 merging m incomplete! (edit conflicts, then use 'hg resolve --mark')
1047 1045 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
1048 1046 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
1049 1047 [1]
1050 1048 $ cat m
1051 1049 $Id$
1052 1050 <<<<<<< local
1053 1051 bar
1054 1052 =======
1055 1053 foo
1056 1054 >>>>>>> other
1057 1055
1058 1056 resolve to local
1059 1057
1060 1058 $ HGMERGE=internal:local hg resolve -a
1061 1059 $ hg commit -m localresolve
1062 1060 $ cat m
1063 1061 $Id: m 800511b3a22d Thu, 01 Jan 1970 00:00:00 +0000 test $
1064 1062 bar
1065 1063
1066 1064 Test restricted mode with transplant -b
1067 1065
1068 1066 $ hg update 6
1069 1067 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1070 1068 $ hg branch foo
1071 1069 marked working directory as branch foo
1072 1070 (branches are permanent and global, did you want a bookmark?)
1073 1071 $ mv a a.bak
1074 1072 $ echo foobranch > a
1075 1073 $ cat a.bak >> a
1076 1074 $ rm a.bak
1077 1075 $ hg commit -m 9foobranch
1078 1076 $ hg update default
1079 1077 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1080 1078 $ hg -y transplant -b foo tip
1081 1079 applying 4aa30d025d50
1082 1080 4aa30d025d50 transplanted to e00abbf63521
1083 1081
1084 1082 Expansion in changeset but not in file
1085 1083
1086 1084 $ hg tip -p
1087 1085 changeset: 11:e00abbf63521
1088 1086 tag: tip
1089 1087 parent: 9:800511b3a22d
1090 1088 user: test
1091 1089 date: Thu Jan 01 00:00:00 1970 +0000
1092 1090 summary: 9foobranch
1093 1091
1094 1092 diff -r 800511b3a22d -r e00abbf63521 a
1095 1093 --- a/a Thu Jan 01 00:00:00 1970 +0000
1096 1094 +++ b/a Thu Jan 01 00:00:00 1970 +0000
1097 1095 @@ -1,3 +1,4 @@
1098 1096 +foobranch
1099 1097 expand $Id$
1100 1098 do not process $Id:
1101 1099 xxx $
1102 1100
1103 1101 $ head -n 2 a
1104 1102 foobranch
1105 1103 expand $Id: a e00abbf63521 Thu, 01 Jan 1970 00:00:00 +0000 test $
1106 1104
1107 1105 Turn off expansion
1108 1106
1109 1107 $ hg -q rollback
1110 1108 $ hg -q update -C
1111 1109
1112 1110 kwshrink with unknown file u
1113 1111
1114 1112 $ cp a u
1115 1113 $ hg --verbose kwshrink
1116 1114 overwriting a shrinking keywords
1117 1115 overwriting m shrinking keywords
1118 1116 overwriting x/a shrinking keywords
1119 1117
1120 1118 Keywords shrunk in working directory, but not yet disabled
1121 1119 - cat shows unexpanded keywords
1122 1120 - hg cat shows expanded keywords
1123 1121
1124 1122 $ cat a b
1125 1123 expand $Id$
1126 1124 do not process $Id:
1127 1125 xxx $
1128 1126 $Xinfo$
1129 1127 ignore $Id$
1130 1128 $ hg cat sym a b && echo
1131 1129 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
1132 1130 do not process $Id:
1133 1131 xxx $
1134 1132 $Xinfo: User Name <user@example.com>: firstline $
1135 1133 ignore $Id$
1136 1134 a
1137 1135
1138 1136 Now disable keyword expansion
1139 1137
1140 1138 $ rm "$HGRCPATH"
1141 1139 $ cat a b
1142 1140 expand $Id$
1143 1141 do not process $Id:
1144 1142 xxx $
1145 1143 $Xinfo$
1146 1144 ignore $Id$
1147 1145 $ hg cat sym a b && echo
1148 1146 expand $Id$
1149 1147 do not process $Id:
1150 1148 xxx $
1151 1149 $Xinfo$
1152 1150 ignore $Id$
1153 1151 a
1154 1152
1155 1153 $ cd ..
@@ -1,108 +1,109 b''
1 1 $ "$TESTDIR/hghave" symlink || exit 80
2 2
3 3 $ echo "[extensions]" >> $HGRCPATH
4 4 $ echo "mq=" >> $HGRCPATH
5 5
6 6 $ hg init
7 7 $ hg qinit
8 8 $ hg qnew base.patch
9 9 $ echo aaa > a
10 10 $ echo bbb > b
11 11 $ echo ccc > c
12 12 $ hg add a b c
13 13 $ hg qrefresh
14 14 $ "$TESTDIR/readlink.py" a
15 15 a -> a not a symlink
16 16
17 17
18 18 test replacing a file with a symlink
19 19
20 20 $ hg qnew symlink.patch
21 21 $ rm a
22 22 $ ln -s b a
23 23 $ hg qrefresh --git
24 24 $ "$TESTDIR/readlink.py" a
25 25 a -> b
26 26
27 27 $ hg qpop
28 28 popping symlink.patch
29 29 now at: base.patch
30 30 $ hg qpush
31 31 applying symlink.patch
32 32 now at: symlink.patch
33 33 $ "$TESTDIR/readlink.py" a
34 34 a -> b
35 35
36 36
37 37 test updating a symlink
38 38
39 39 $ rm a
40 40 $ ln -s c a
41 41 $ hg qnew --git -f updatelink
42 42 $ "$TESTDIR/readlink.py" a
43 43 a -> c
44 44 $ hg qpop
45 45 popping updatelink
46 46 now at: symlink.patch
47 47 $ hg qpush --debug
48 invalid branchheads cache (served): tip differs
48 49 applying updatelink
49 50 patching file a
50 51 a
51 52 now at: updatelink
52 53 $ "$TESTDIR/readlink.py" a
53 54 a -> c
54 55 $ hg st
55 56
56 57
57 58 test replacing a symlink with a file
58 59
59 60 $ ln -s c s
60 61 $ hg add s
61 62 $ hg qnew --git -f addlink
62 63 $ rm s
63 64 $ echo sss > s
64 65 $ hg qnew --git -f replacelinkwithfile
65 66 $ hg qpop
66 67 popping replacelinkwithfile
67 68 now at: addlink
68 69 $ hg qpush
69 70 applying replacelinkwithfile
70 71 now at: replacelinkwithfile
71 72 $ cat s
72 73 sss
73 74 $ hg st
74 75
75 76
76 77 test symlink removal
77 78
78 79 $ hg qnew removesl.patch
79 80 $ hg rm a
80 81 $ hg qrefresh --git
81 82 $ hg qpop
82 83 popping removesl.patch
83 84 now at: replacelinkwithfile
84 85 $ hg qpush
85 86 applying removesl.patch
86 87 now at: removesl.patch
87 88 $ hg st -c
88 89 C b
89 90 C c
90 91 C s
91 92
92 93 replace broken symlink with another broken symlink
93 94
94 95 $ ln -s linka linka
95 96 $ hg add linka
96 97 $ hg qnew link
97 98 $ hg mv linka linkb
98 99 $ rm linkb
99 100 $ ln -s linkb linkb
100 101 $ hg qnew movelink
101 102 $ hg qpop
102 103 popping movelink
103 104 now at: link
104 105 $ hg qpush
105 106 applying movelink
106 107 now at: movelink
107 108 $ "$TESTDIR/readlink.py" linkb
108 109 linkb -> linkb
@@ -1,503 +1,513 b''
1 1 $ hglog() { hg log --template "{rev} {phaseidx} {desc}\n" $*; }
2 2 $ mkcommit() {
3 3 > echo "$1" > "$1"
4 4 > hg add "$1"
5 5 > message="$1"
6 6 > shift
7 7 > hg ci -m "$message" $*
8 8 > }
9 9
10 10 $ hg init initialrepo
11 11 $ cd initialrepo
12 12
13 13 Cannot change null revision phase
14 14
15 15 $ hg phase --force --secret null
16 16 abort: cannot change null revision phase
17 17 [255]
18 18 $ hg phase null
19 19 -1: public
20 20
21 21 $ mkcommit A
22 22
23 23 New commit are draft by default
24 24
25 25 $ hglog
26 26 0 1 A
27 27
28 28 Following commit are draft too
29 29
30 30 $ mkcommit B
31 31
32 32 $ hglog
33 33 1 1 B
34 34 0 1 A
35 35
36 36 Draft commit are properly created over public one:
37 37
38 38 $ hg phase --public .
39 39 $ hglog
40 40 1 0 B
41 41 0 0 A
42 42
43 43 $ mkcommit C
44 44 $ mkcommit D
45 45
46 46 $ hglog
47 47 3 1 D
48 48 2 1 C
49 49 1 0 B
50 50 0 0 A
51 51
52 52 Test creating changeset as secret
53 53
54 54 $ mkcommit E --config phases.new-commit='secret'
55 55 $ hglog
56 56 4 2 E
57 57 3 1 D
58 58 2 1 C
59 59 1 0 B
60 60 0 0 A
61 61
62 62 Test the secret property is inherited
63 63
64 64 $ mkcommit H
65 65 $ hglog
66 66 5 2 H
67 67 4 2 E
68 68 3 1 D
69 69 2 1 C
70 70 1 0 B
71 71 0 0 A
72 72
73 73 Even on merge
74 74
75 75 $ hg up -q 1
76 76 $ mkcommit "B'"
77 77 created new head
78 78 $ hglog
79 79 6 1 B'
80 80 5 2 H
81 81 4 2 E
82 82 3 1 D
83 83 2 1 C
84 84 1 0 B
85 85 0 0 A
86 86 $ hg merge 4 # E
87 87 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 88 (branch merge, don't forget to commit)
89 89 $ hg ci -m "merge B' and E"
90 90 $ hglog
91 91 7 2 merge B' and E
92 92 6 1 B'
93 93 5 2 H
94 94 4 2 E
95 95 3 1 D
96 96 2 1 C
97 97 1 0 B
98 98 0 0 A
99 99
100 100 Test secret changeset are not pushed
101 101
102 102 $ hg init ../push-dest
103 103 $ cat > ../push-dest/.hg/hgrc << EOF
104 104 > [phases]
105 105 > publish=False
106 106 > EOF
107 107 $ hg outgoing ../push-dest --template='{rev} {phase} {desc|firstline}\n'
108 108 comparing with ../push-dest
109 109 searching for changes
110 110 0 public A
111 111 1 public B
112 112 2 draft C
113 113 3 draft D
114 114 6 draft B'
115 115 $ hg outgoing -r 'branch(default)' ../push-dest --template='{rev} {phase} {desc|firstline}\n'
116 116 comparing with ../push-dest
117 117 searching for changes
118 118 0 public A
119 119 1 public B
120 120 2 draft C
121 121 3 draft D
122 122 6 draft B'
123 123
124 124 $ hg push ../push-dest -f # force because we push multiple heads
125 125 pushing to ../push-dest
126 126 searching for changes
127 127 adding changesets
128 128 adding manifests
129 129 adding file changes
130 130 added 5 changesets with 5 changes to 5 files (+1 heads)
131 131 $ hglog
132 132 7 2 merge B' and E
133 133 6 1 B'
134 134 5 2 H
135 135 4 2 E
136 136 3 1 D
137 137 2 1 C
138 138 1 0 B
139 139 0 0 A
140 140 $ cd ../push-dest
141 141 $ hglog
142 142 4 1 B'
143 143 3 1 D
144 144 2 1 C
145 145 1 0 B
146 146 0 0 A
147 147
148 148 (Issue3303)
149 149 Check that remote secret changeset are ignore when checking creation of remote heads
150 150
151 151 We add a secret head into the push destination. This secreat head shadow a
152 152 visible shared between the initial repo and the push destination.
153 153
154 154 $ hg up -q 4 # B'
155 155 $ mkcommit Z --config phases.new-commit=secret
156 156 $ hg phase .
157 157 5: secret
158 158
159 159 # We now try to push a new public changeset that descend from the common public
160 160 # head shadowed by the remote secret head.
161 161
162 162 $ cd ../initialrepo
163 163 $ hg up -q 6 #B'
164 164 $ mkcommit I
165 165 created new head
166 166 $ hg push ../push-dest
167 167 pushing to ../push-dest
168 168 searching for changes
169 169 adding changesets
170 170 adding manifests
171 171 adding file changes
172 172 added 1 changesets with 1 changes to 1 files (+1 heads)
173 173
174 174 :note: The "(+1 heads)" is wrong as we do not had any visible head
175 175
176 176 check that branch cache with "unserved" filter are properly computed and stored
177 177
178 178 $ ls ../push-dest/.hg/cache/branchheads*
179 179 ../push-dest/.hg/cache/branchheads-served
180 $ cat ../push-dest/.hg/cache/branchheads-served
181 6d6770faffce199f1fddd1cf87f6f026138cf061 6 465891ffab3c47a3c23792f7dc84156e19a90722
182 b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e default
183 6d6770faffce199f1fddd1cf87f6f026138cf061 default
184 $ hg heads -R ../push-dest --template '{rev}:{node} {phase}\n' #update visible cache too
185 6:6d6770faffce199f1fddd1cf87f6f026138cf061 draft
186 5:2713879da13d6eea1ff22b442a5a87cb31a7ce6a secret
187 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e draft
188 $ ls ../push-dest/.hg/cache/branchheads*
189 ../push-dest/.hg/cache/branchheads-served
180 190 ../push-dest/.hg/cache/branchheads-visible
191 $ cat ../push-dest/.hg/cache/branchheads-served
192 6d6770faffce199f1fddd1cf87f6f026138cf061 6 465891ffab3c47a3c23792f7dc84156e19a90722
193 b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e default
194 6d6770faffce199f1fddd1cf87f6f026138cf061 default
181 195 $ cat ../push-dest/.hg/cache/branchheads-visible
182 196 6d6770faffce199f1fddd1cf87f6f026138cf061 6
183 197 b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e default
184 198 2713879da13d6eea1ff22b442a5a87cb31a7ce6a default
185 199 6d6770faffce199f1fddd1cf87f6f026138cf061 default
186 $ cat ../push-dest/.hg/cache/branchheads-served
187 cf9fe039dfd67e829edf6522a45de057b5c86519 4
188 b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e default
189 cf9fe039dfd67e829edf6522a45de057b5c86519 default
190 200
191 201
192 202 Restore condition prior extra insertion.
193 203 $ hg -q --config extensions.mq= strip .
194 204 $ hg up -q 7
195 205 $ cd ..
196 206
197 207 Test secret changeset are not pull
198 208
199 209 $ hg init pull-dest
200 210 $ cd pull-dest
201 211 $ hg pull ../initialrepo
202 212 pulling from ../initialrepo
203 213 requesting all changes
204 214 adding changesets
205 215 adding manifests
206 216 adding file changes
207 217 added 5 changesets with 5 changes to 5 files (+1 heads)
208 218 (run 'hg heads' to see heads, 'hg merge' to merge)
209 219 $ hglog
210 220 4 0 B'
211 221 3 0 D
212 222 2 0 C
213 223 1 0 B
214 224 0 0 A
215 225 $ cd ..
216 226
217 227 But secret can still be bundled explicitly
218 228
219 229 $ cd initialrepo
220 230 $ hg bundle --base '4^' -r 'children(4)' ../secret-bundle.hg
221 231 4 changesets found
222 232 $ cd ..
223 233
224 234 Test secret changeset are not cloned
225 235 (during local clone)
226 236
227 237 $ hg clone -qU initialrepo clone-dest
228 238 $ hglog -R clone-dest
229 239 4 0 B'
230 240 3 0 D
231 241 2 0 C
232 242 1 0 B
233 243 0 0 A
234 244
235 245 Test revset
236 246
237 247 $ cd initialrepo
238 248 $ hglog -r 'public()'
239 249 0 0 A
240 250 1 0 B
241 251 $ hglog -r 'draft()'
242 252 2 1 C
243 253 3 1 D
244 254 6 1 B'
245 255 $ hglog -r 'secret()'
246 256 4 2 E
247 257 5 2 H
248 258 7 2 merge B' and E
249 259
250 260 test that phase are displayed in log at debug level
251 261
252 262 $ hg log --debug
253 263 changeset: 7:17a481b3bccb796c0521ae97903d81c52bfee4af
254 264 tag: tip
255 265 phase: secret
256 266 parent: 6:cf9fe039dfd67e829edf6522a45de057b5c86519
257 267 parent: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
258 268 manifest: 7:5e724ffacba267b2ab726c91fc8b650710deaaa8
259 269 user: test
260 270 date: Thu Jan 01 00:00:00 1970 +0000
261 271 files+: C D E
262 272 extra: branch=default
263 273 description:
264 274 merge B' and E
265 275
266 276
267 277 changeset: 6:cf9fe039dfd67e829edf6522a45de057b5c86519
268 278 phase: draft
269 279 parent: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
270 280 parent: -1:0000000000000000000000000000000000000000
271 281 manifest: 6:ab8bfef2392903058bf4ebb9e7746e8d7026b27a
272 282 user: test
273 283 date: Thu Jan 01 00:00:00 1970 +0000
274 284 files+: B'
275 285 extra: branch=default
276 286 description:
277 287 B'
278 288
279 289
280 290 changeset: 5:a030c6be5127abc010fcbff1851536552e6951a8
281 291 phase: secret
282 292 parent: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
283 293 parent: -1:0000000000000000000000000000000000000000
284 294 manifest: 5:5c710aa854874fe3d5fa7192e77bdb314cc08b5a
285 295 user: test
286 296 date: Thu Jan 01 00:00:00 1970 +0000
287 297 files+: H
288 298 extra: branch=default
289 299 description:
290 300 H
291 301
292 302
293 303 changeset: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
294 304 phase: secret
295 305 parent: 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
296 306 parent: -1:0000000000000000000000000000000000000000
297 307 manifest: 4:7173fd1c27119750b959e3a0f47ed78abe75d6dc
298 308 user: test
299 309 date: Thu Jan 01 00:00:00 1970 +0000
300 310 files+: E
301 311 extra: branch=default
302 312 description:
303 313 E
304 314
305 315
306 316 changeset: 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
307 317 phase: draft
308 318 parent: 2:f838bfaca5c7226600ebcfd84f3c3c13a28d3757
309 319 parent: -1:0000000000000000000000000000000000000000
310 320 manifest: 3:6e1f4c47ecb533ffd0c8e52cdc88afb6cd39e20c
311 321 user: test
312 322 date: Thu Jan 01 00:00:00 1970 +0000
313 323 files+: D
314 324 extra: branch=default
315 325 description:
316 326 D
317 327
318 328
319 329 changeset: 2:f838bfaca5c7226600ebcfd84f3c3c13a28d3757
320 330 phase: draft
321 331 parent: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
322 332 parent: -1:0000000000000000000000000000000000000000
323 333 manifest: 2:66a5a01817fdf5239c273802b5b7618d051c89e4
324 334 user: test
325 335 date: Thu Jan 01 00:00:00 1970 +0000
326 336 files+: C
327 337 extra: branch=default
328 338 description:
329 339 C
330 340
331 341
332 342 changeset: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
333 343 parent: 0:4a2df7238c3b48766b5e22fafbb8a2f506ec8256
334 344 parent: -1:0000000000000000000000000000000000000000
335 345 manifest: 1:cb5cbbc1bfbf24cc34b9e8c16914e9caa2d2a7fd
336 346 user: test
337 347 date: Thu Jan 01 00:00:00 1970 +0000
338 348 files+: B
339 349 extra: branch=default
340 350 description:
341 351 B
342 352
343 353
344 354 changeset: 0:4a2df7238c3b48766b5e22fafbb8a2f506ec8256
345 355 parent: -1:0000000000000000000000000000000000000000
346 356 parent: -1:0000000000000000000000000000000000000000
347 357 manifest: 0:007d8c9d88841325f5c6b06371b35b4e8a2b1a83
348 358 user: test
349 359 date: Thu Jan 01 00:00:00 1970 +0000
350 360 files+: A
351 361 extra: branch=default
352 362 description:
353 363 A
354 364
355 365
356 366
357 367
358 368 (Issue3707)
359 369 test invalid phase name
360 370
361 371 $ mkcommit I --config phases.new-commit='babar'
362 372 transaction abort!
363 373 rollback completed
364 374 abort: phases.new-commit: not a valid phase name ('babar')
365 375 [255]
366 376 Test phase command
367 377 ===================
368 378
369 379 initial picture
370 380
371 381 $ cat >> $HGRCPATH << EOF
372 382 > [extensions]
373 383 > hgext.graphlog=
374 384 > EOF
375 385 $ hg log -G --template "{rev} {phase} {desc}\n"
376 386 @ 7 secret merge B' and E
377 387 |\
378 388 | o 6 draft B'
379 389 | |
380 390 +---o 5 secret H
381 391 | |
382 392 o | 4 secret E
383 393 | |
384 394 o | 3 draft D
385 395 | |
386 396 o | 2 draft C
387 397 |/
388 398 o 1 public B
389 399 |
390 400 o 0 public A
391 401
392 402
393 403 display changesets phase
394 404
395 405 (mixing -r and plain rev specification)
396 406
397 407 $ hg phase 1::4 -r 7
398 408 1: public
399 409 2: draft
400 410 3: draft
401 411 4: secret
402 412 7: secret
403 413
404 414
405 415 move changeset forward
406 416
407 417 (with -r option)
408 418
409 419 $ hg phase --public -r 2
410 420 $ hg log -G --template "{rev} {phase} {desc}\n"
411 421 @ 7 secret merge B' and E
412 422 |\
413 423 | o 6 draft B'
414 424 | |
415 425 +---o 5 secret H
416 426 | |
417 427 o | 4 secret E
418 428 | |
419 429 o | 3 draft D
420 430 | |
421 431 o | 2 public C
422 432 |/
423 433 o 1 public B
424 434 |
425 435 o 0 public A
426 436
427 437
428 438 move changeset backward
429 439
430 440 (without -r option)
431 441
432 442 $ hg phase --draft --force 2
433 443 $ hg log -G --template "{rev} {phase} {desc}\n"
434 444 @ 7 secret merge B' and E
435 445 |\
436 446 | o 6 draft B'
437 447 | |
438 448 +---o 5 secret H
439 449 | |
440 450 o | 4 secret E
441 451 | |
442 452 o | 3 draft D
443 453 | |
444 454 o | 2 draft C
445 455 |/
446 456 o 1 public B
447 457 |
448 458 o 0 public A
449 459
450 460
451 461 move changeset forward and backward
452 462
453 463 $ hg phase --draft --force 1::4
454 464 $ hg log -G --template "{rev} {phase} {desc}\n"
455 465 @ 7 secret merge B' and E
456 466 |\
457 467 | o 6 draft B'
458 468 | |
459 469 +---o 5 secret H
460 470 | |
461 471 o | 4 draft E
462 472 | |
463 473 o | 3 draft D
464 474 | |
465 475 o | 2 draft C
466 476 |/
467 477 o 1 draft B
468 478 |
469 479 o 0 public A
470 480
471 481 test partial failure
472 482
473 483 $ hg phase --public 7
474 484 $ hg phase --draft '5 or 7'
475 485 cannot move 1 changesets to a more permissive phase, use --force
476 486 phase changed for 1 changesets
477 487 [1]
478 488 $ hg log -G --template "{rev} {phase} {desc}\n"
479 489 @ 7 public merge B' and E
480 490 |\
481 491 | o 6 public B'
482 492 | |
483 493 +---o 5 draft H
484 494 | |
485 495 o | 4 public E
486 496 | |
487 497 o | 3 public D
488 498 | |
489 499 o | 2 public C
490 500 |/
491 501 o 1 public B
492 502 |
493 503 o 0 public A
494 504
495 505
496 506 test complete failure
497 507
498 508 $ hg phase --draft 7
499 509 cannot move 1 changesets to a more permissive phase, use --force
500 510 no phases changed
501 511 [1]
502 512
503 513 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now