##// END OF EJS Templates
phases: add rollback support
Pierre-Yves David -
r15455:c6f87bda default
parent child Browse files
Show More
@@ -1,2124 +1,2133
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39 self._dirtyphases = False
40 40
41 41 try:
42 42 self.ui.readconfig(self.join("hgrc"), self.root)
43 43 extensions.loadall(self.ui)
44 44 except IOError:
45 45 pass
46 46
47 47 if not os.path.isdir(self.path):
48 48 if create:
49 49 if not os.path.exists(path):
50 50 util.makedirs(path)
51 51 util.makedir(self.path, notindexed=True)
52 52 requirements = ["revlogv1"]
53 53 if self.ui.configbool('format', 'usestore', True):
54 54 os.mkdir(os.path.join(self.path, "store"))
55 55 requirements.append("store")
56 56 if self.ui.configbool('format', 'usefncache', True):
57 57 requirements.append("fncache")
58 58 if self.ui.configbool('format', 'dotencode', True):
59 59 requirements.append('dotencode')
60 60 # create an invalid changelog
61 61 self.opener.append(
62 62 "00changelog.i",
63 63 '\0\0\0\2' # represents revlogv2
64 64 ' dummy changelog to prevent using the old repo layout'
65 65 )
66 66 if self.ui.configbool('format', 'generaldelta', False):
67 67 requirements.append("generaldelta")
68 68 requirements = set(requirements)
69 69 else:
70 70 raise error.RepoError(_("repository %s not found") % path)
71 71 elif create:
72 72 raise error.RepoError(_("repository %s already exists") % path)
73 73 else:
74 74 try:
75 75 requirements = scmutil.readrequires(self.opener, self.supported)
76 76 except IOError, inst:
77 77 if inst.errno != errno.ENOENT:
78 78 raise
79 79 requirements = set()
80 80
81 81 self.sharedpath = self.path
82 82 try:
83 83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 84 if not os.path.exists(s):
85 85 raise error.RepoError(
86 86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 87 self.sharedpath = s
88 88 except IOError, inst:
89 89 if inst.errno != errno.ENOENT:
90 90 raise
91 91
92 92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 93 self.spath = self.store.path
94 94 self.sopener = self.store.opener
95 95 self.sjoin = self.store.join
96 96 self.opener.createmode = self.store.createmode
97 97 self._applyrequirements(requirements)
98 98 if create:
99 99 self._writerequirements()
100 100
101 101
102 102 self._branchcache = None
103 103 self._branchcachetip = None
104 104 self.filterpats = {}
105 105 self._datafilters = {}
106 106 self._transref = self._lockref = self._wlockref = None
107 107
108 108 # A cache for various files under .hg/ that tracks file changes,
109 109 # (used by the filecache decorator)
110 110 #
111 111 # Maps a property name to its util.filecacheentry
112 112 self._filecache = {}
113 113
114 114 def _applyrequirements(self, requirements):
115 115 self.requirements = requirements
116 116 openerreqs = set(('revlogv1', 'generaldelta'))
117 117 self.sopener.options = dict((r, 1) for r in requirements
118 118 if r in openerreqs)
119 119
120 120 def _writerequirements(self):
121 121 reqfile = self.opener("requires", "w")
122 122 for r in self.requirements:
123 123 reqfile.write("%s\n" % r)
124 124 reqfile.close()
125 125
126 126 def _checknested(self, path):
127 127 """Determine if path is a legal nested repository."""
128 128 if not path.startswith(self.root):
129 129 return False
130 130 subpath = path[len(self.root) + 1:]
131 131
132 132 # XXX: Checking against the current working copy is wrong in
133 133 # the sense that it can reject things like
134 134 #
135 135 # $ hg cat -r 10 sub/x.txt
136 136 #
137 137 # if sub/ is no longer a subrepository in the working copy
138 138 # parent revision.
139 139 #
140 140 # However, it can of course also allow things that would have
141 141 # been rejected before, such as the above cat command if sub/
142 142 # is a subrepository now, but was a normal directory before.
143 143 # The old path auditor would have rejected by mistake since it
144 144 # panics when it sees sub/.hg/.
145 145 #
146 146 # All in all, checking against the working copy seems sensible
147 147 # since we want to prevent access to nested repositories on
148 148 # the filesystem *now*.
149 149 ctx = self[None]
150 150 parts = util.splitpath(subpath)
151 151 while parts:
152 152 prefix = os.sep.join(parts)
153 153 if prefix in ctx.substate:
154 154 if prefix == subpath:
155 155 return True
156 156 else:
157 157 sub = ctx.sub(prefix)
158 158 return sub.checknested(subpath[len(prefix) + 1:])
159 159 else:
160 160 parts.pop()
161 161 return False
162 162
163 163 @filecache('bookmarks')
164 164 def _bookmarks(self):
165 165 return bookmarks.read(self)
166 166
167 167 @filecache('bookmarks.current')
168 168 def _bookmarkcurrent(self):
169 169 return bookmarks.readcurrent(self)
170 170
171 171 def _writebookmarks(self, marks):
172 172 bookmarks.write(self)
173 173
174 174 @filecache('phaseroots')
175 175 def _phaseroots(self):
176 176 self._dirtyphases = False
177 177 return phases.readroots(self)
178 178
179 179 @propertycache
180 180 def _phaserev(self):
181 181 cache = [0] * len(self)
182 182 for phase in phases.trackedphases:
183 183 roots = map(self.changelog.rev, self._phaseroots[phase])
184 184 if roots:
185 185 for rev in roots:
186 186 cache[rev] = phase
187 187 for rev in self.changelog.descendants(*roots):
188 188 cache[rev] = phase
189 189 return cache
190 190
191 191 @filecache('00changelog.i', True)
192 192 def changelog(self):
193 193 c = changelog.changelog(self.sopener)
194 194 if 'HG_PENDING' in os.environ:
195 195 p = os.environ['HG_PENDING']
196 196 if p.startswith(self.root):
197 197 c.readpending('00changelog.i.a')
198 198 return c
199 199
200 200 @filecache('00manifest.i', True)
201 201 def manifest(self):
202 202 return manifest.manifest(self.sopener)
203 203
204 204 @filecache('dirstate')
205 205 def dirstate(self):
206 206 warned = [0]
207 207 def validate(node):
208 208 try:
209 209 self.changelog.rev(node)
210 210 return node
211 211 except error.LookupError:
212 212 if not warned[0]:
213 213 warned[0] = True
214 214 self.ui.warn(_("warning: ignoring unknown"
215 215 " working parent %s!\n") % short(node))
216 216 return nullid
217 217
218 218 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
219 219
220 220 def __getitem__(self, changeid):
221 221 if changeid is None:
222 222 return context.workingctx(self)
223 223 return context.changectx(self, changeid)
224 224
225 225 def __contains__(self, changeid):
226 226 try:
227 227 return bool(self.lookup(changeid))
228 228 except error.RepoLookupError:
229 229 return False
230 230
231 231 def __nonzero__(self):
232 232 return True
233 233
234 234 def __len__(self):
235 235 return len(self.changelog)
236 236
237 237 def __iter__(self):
238 238 for i in xrange(len(self)):
239 239 yield i
240 240
241 241 def revs(self, expr, *args):
242 242 '''Return a list of revisions matching the given revset'''
243 243 expr = revset.formatspec(expr, *args)
244 244 m = revset.match(None, expr)
245 245 return [r for r in m(self, range(len(self)))]
246 246
247 247 def set(self, expr, *args):
248 248 '''
249 249 Yield a context for each matching revision, after doing arg
250 250 replacement via revset.formatspec
251 251 '''
252 252 for r in self.revs(expr, *args):
253 253 yield self[r]
254 254
255 255 def url(self):
256 256 return 'file:' + self.root
257 257
258 258 def hook(self, name, throw=False, **args):
259 259 return hook.hook(self.ui, self, name, throw, **args)
260 260
261 261 tag_disallowed = ':\r\n'
262 262
263 263 def _tag(self, names, node, message, local, user, date, extra={}):
264 264 if isinstance(names, str):
265 265 allchars = names
266 266 names = (names,)
267 267 else:
268 268 allchars = ''.join(names)
269 269 for c in self.tag_disallowed:
270 270 if c in allchars:
271 271 raise util.Abort(_('%r cannot be used in a tag name') % c)
272 272
273 273 branches = self.branchmap()
274 274 for name in names:
275 275 self.hook('pretag', throw=True, node=hex(node), tag=name,
276 276 local=local)
277 277 if name in branches:
278 278 self.ui.warn(_("warning: tag %s conflicts with existing"
279 279 " branch name\n") % name)
280 280
281 281 def writetags(fp, names, munge, prevtags):
282 282 fp.seek(0, 2)
283 283 if prevtags and prevtags[-1] != '\n':
284 284 fp.write('\n')
285 285 for name in names:
286 286 m = munge and munge(name) or name
287 287 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
288 288 old = self.tags().get(name, nullid)
289 289 fp.write('%s %s\n' % (hex(old), m))
290 290 fp.write('%s %s\n' % (hex(node), m))
291 291 fp.close()
292 292
293 293 prevtags = ''
294 294 if local:
295 295 try:
296 296 fp = self.opener('localtags', 'r+')
297 297 except IOError:
298 298 fp = self.opener('localtags', 'a')
299 299 else:
300 300 prevtags = fp.read()
301 301
302 302 # local tags are stored in the current charset
303 303 writetags(fp, names, None, prevtags)
304 304 for name in names:
305 305 self.hook('tag', node=hex(node), tag=name, local=local)
306 306 return
307 307
308 308 try:
309 309 fp = self.wfile('.hgtags', 'rb+')
310 310 except IOError, e:
311 311 if e.errno != errno.ENOENT:
312 312 raise
313 313 fp = self.wfile('.hgtags', 'ab')
314 314 else:
315 315 prevtags = fp.read()
316 316
317 317 # committed tags are stored in UTF-8
318 318 writetags(fp, names, encoding.fromlocal, prevtags)
319 319
320 320 fp.close()
321 321
322 322 if '.hgtags' not in self.dirstate:
323 323 self[None].add(['.hgtags'])
324 324
325 325 m = matchmod.exact(self.root, '', ['.hgtags'])
326 326 tagnode = self.commit(message, user, date, extra=extra, match=m)
327 327
328 328 for name in names:
329 329 self.hook('tag', node=hex(node), tag=name, local=local)
330 330
331 331 return tagnode
332 332
333 333 def tag(self, names, node, message, local, user, date):
334 334 '''tag a revision with one or more symbolic names.
335 335
336 336 names is a list of strings or, when adding a single tag, names may be a
337 337 string.
338 338
339 339 if local is True, the tags are stored in a per-repository file.
340 340 otherwise, they are stored in the .hgtags file, and a new
341 341 changeset is committed with the change.
342 342
343 343 keyword arguments:
344 344
345 345 local: whether to store tags in non-version-controlled file
346 346 (default False)
347 347
348 348 message: commit message to use if committing
349 349
350 350 user: name of user to use if committing
351 351
352 352 date: date tuple to use if committing'''
353 353
354 354 if not local:
355 355 for x in self.status()[:5]:
356 356 if '.hgtags' in x:
357 357 raise util.Abort(_('working copy of .hgtags is changed '
358 358 '(please commit .hgtags manually)'))
359 359
360 360 self.tags() # instantiate the cache
361 361 self._tag(names, node, message, local, user, date)
362 362
363 363 @propertycache
364 364 def _tagscache(self):
365 365 '''Returns a tagscache object that contains various tags related caches.'''
366 366
367 367 # This simplifies its cache management by having one decorated
368 368 # function (this one) and the rest simply fetch things from it.
369 369 class tagscache(object):
370 370 def __init__(self):
371 371 # These two define the set of tags for this repository. tags
372 372 # maps tag name to node; tagtypes maps tag name to 'global' or
373 373 # 'local'. (Global tags are defined by .hgtags across all
374 374 # heads, and local tags are defined in .hg/localtags.)
375 375 # They constitute the in-memory cache of tags.
376 376 self.tags = self.tagtypes = None
377 377
378 378 self.nodetagscache = self.tagslist = None
379 379
380 380 cache = tagscache()
381 381 cache.tags, cache.tagtypes = self._findtags()
382 382
383 383 return cache
384 384
385 385 def tags(self):
386 386 '''return a mapping of tag to node'''
387 387 return self._tagscache.tags
388 388
389 389 def _findtags(self):
390 390 '''Do the hard work of finding tags. Return a pair of dicts
391 391 (tags, tagtypes) where tags maps tag name to node, and tagtypes
392 392 maps tag name to a string like \'global\' or \'local\'.
393 393 Subclasses or extensions are free to add their own tags, but
394 394 should be aware that the returned dicts will be retained for the
395 395 duration of the localrepo object.'''
396 396
397 397 # XXX what tagtype should subclasses/extensions use? Currently
398 398 # mq and bookmarks add tags, but do not set the tagtype at all.
399 399 # Should each extension invent its own tag type? Should there
400 400 # be one tagtype for all such "virtual" tags? Or is the status
401 401 # quo fine?
402 402
403 403 alltags = {} # map tag name to (node, hist)
404 404 tagtypes = {}
405 405
406 406 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
407 407 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
408 408
409 409 # Build the return dicts. Have to re-encode tag names because
410 410 # the tags module always uses UTF-8 (in order not to lose info
411 411 # writing to the cache), but the rest of Mercurial wants them in
412 412 # local encoding.
413 413 tags = {}
414 414 for (name, (node, hist)) in alltags.iteritems():
415 415 if node != nullid:
416 416 try:
417 417 # ignore tags to unknown nodes
418 418 self.changelog.lookup(node)
419 419 tags[encoding.tolocal(name)] = node
420 420 except error.LookupError:
421 421 pass
422 422 tags['tip'] = self.changelog.tip()
423 423 tagtypes = dict([(encoding.tolocal(name), value)
424 424 for (name, value) in tagtypes.iteritems()])
425 425 return (tags, tagtypes)
426 426
427 427 def tagtype(self, tagname):
428 428 '''
429 429 return the type of the given tag. result can be:
430 430
431 431 'local' : a local tag
432 432 'global' : a global tag
433 433 None : tag does not exist
434 434 '''
435 435
436 436 return self._tagscache.tagtypes.get(tagname)
437 437
438 438 def tagslist(self):
439 439 '''return a list of tags ordered by revision'''
440 440 if not self._tagscache.tagslist:
441 441 l = []
442 442 for t, n in self.tags().iteritems():
443 443 r = self.changelog.rev(n)
444 444 l.append((r, t, n))
445 445 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
446 446
447 447 return self._tagscache.tagslist
448 448
449 449 def nodetags(self, node):
450 450 '''return the tags associated with a node'''
451 451 if not self._tagscache.nodetagscache:
452 452 nodetagscache = {}
453 453 for t, n in self.tags().iteritems():
454 454 nodetagscache.setdefault(n, []).append(t)
455 455 for tags in nodetagscache.itervalues():
456 456 tags.sort()
457 457 self._tagscache.nodetagscache = nodetagscache
458 458 return self._tagscache.nodetagscache.get(node, [])
459 459
460 460 def nodebookmarks(self, node):
461 461 marks = []
462 462 for bookmark, n in self._bookmarks.iteritems():
463 463 if n == node:
464 464 marks.append(bookmark)
465 465 return sorted(marks)
466 466
467 467 def _branchtags(self, partial, lrev):
468 468 # TODO: rename this function?
469 469 tiprev = len(self) - 1
470 470 if lrev != tiprev:
471 471 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
472 472 self._updatebranchcache(partial, ctxgen)
473 473 self._writebranchcache(partial, self.changelog.tip(), tiprev)
474 474
475 475 return partial
476 476
477 477 def updatebranchcache(self):
478 478 tip = self.changelog.tip()
479 479 if self._branchcache is not None and self._branchcachetip == tip:
480 480 return self._branchcache
481 481
482 482 oldtip = self._branchcachetip
483 483 self._branchcachetip = tip
484 484 if oldtip is None or oldtip not in self.changelog.nodemap:
485 485 partial, last, lrev = self._readbranchcache()
486 486 else:
487 487 lrev = self.changelog.rev(oldtip)
488 488 partial = self._branchcache
489 489
490 490 self._branchtags(partial, lrev)
491 491 # this private cache holds all heads (not just tips)
492 492 self._branchcache = partial
493 493
494 494 def branchmap(self):
495 495 '''returns a dictionary {branch: [branchheads]}'''
496 496 self.updatebranchcache()
497 497 return self._branchcache
498 498
499 499 def branchtags(self):
500 500 '''return a dict where branch names map to the tipmost head of
501 501 the branch, open heads come before closed'''
502 502 bt = {}
503 503 for bn, heads in self.branchmap().iteritems():
504 504 tip = heads[-1]
505 505 for h in reversed(heads):
506 506 if 'close' not in self.changelog.read(h)[5]:
507 507 tip = h
508 508 break
509 509 bt[bn] = tip
510 510 return bt
511 511
512 512 def _readbranchcache(self):
513 513 partial = {}
514 514 try:
515 515 f = self.opener("cache/branchheads")
516 516 lines = f.read().split('\n')
517 517 f.close()
518 518 except (IOError, OSError):
519 519 return {}, nullid, nullrev
520 520
521 521 try:
522 522 last, lrev = lines.pop(0).split(" ", 1)
523 523 last, lrev = bin(last), int(lrev)
524 524 if lrev >= len(self) or self[lrev].node() != last:
525 525 # invalidate the cache
526 526 raise ValueError('invalidating branch cache (tip differs)')
527 527 for l in lines:
528 528 if not l:
529 529 continue
530 530 node, label = l.split(" ", 1)
531 531 label = encoding.tolocal(label.strip())
532 532 partial.setdefault(label, []).append(bin(node))
533 533 except KeyboardInterrupt:
534 534 raise
535 535 except Exception, inst:
536 536 if self.ui.debugflag:
537 537 self.ui.warn(str(inst), '\n')
538 538 partial, last, lrev = {}, nullid, nullrev
539 539 return partial, last, lrev
540 540
541 541 def _writebranchcache(self, branches, tip, tiprev):
542 542 try:
543 543 f = self.opener("cache/branchheads", "w", atomictemp=True)
544 544 f.write("%s %s\n" % (hex(tip), tiprev))
545 545 for label, nodes in branches.iteritems():
546 546 for node in nodes:
547 547 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
548 548 f.close()
549 549 except (IOError, OSError):
550 550 pass
551 551
552 552 def _updatebranchcache(self, partial, ctxgen):
553 553 # collect new branch entries
554 554 newbranches = {}
555 555 for c in ctxgen:
556 556 newbranches.setdefault(c.branch(), []).append(c.node())
557 557 # if older branchheads are reachable from new ones, they aren't
558 558 # really branchheads. Note checking parents is insufficient:
559 559 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
560 560 for branch, newnodes in newbranches.iteritems():
561 561 bheads = partial.setdefault(branch, [])
562 562 bheads.extend(newnodes)
563 563 if len(bheads) <= 1:
564 564 continue
565 565 bheads = sorted(bheads, key=lambda x: self[x].rev())
566 566 # starting from tip means fewer passes over reachable
567 567 while newnodes:
568 568 latest = newnodes.pop()
569 569 if latest not in bheads:
570 570 continue
571 571 minbhrev = self[bheads[0]].node()
572 572 reachable = self.changelog.reachable(latest, minbhrev)
573 573 reachable.remove(latest)
574 574 if reachable:
575 575 bheads = [b for b in bheads if b not in reachable]
576 576 partial[branch] = bheads
577 577
578 578 def lookup(self, key):
579 579 if isinstance(key, int):
580 580 return self.changelog.node(key)
581 581 elif key == '.':
582 582 return self.dirstate.p1()
583 583 elif key == 'null':
584 584 return nullid
585 585 elif key == 'tip':
586 586 return self.changelog.tip()
587 587 n = self.changelog._match(key)
588 588 if n:
589 589 return n
590 590 if key in self._bookmarks:
591 591 return self._bookmarks[key]
592 592 if key in self.tags():
593 593 return self.tags()[key]
594 594 if key in self.branchtags():
595 595 return self.branchtags()[key]
596 596 n = self.changelog._partialmatch(key)
597 597 if n:
598 598 return n
599 599
600 600 # can't find key, check if it might have come from damaged dirstate
601 601 if key in self.dirstate.parents():
602 602 raise error.Abort(_("working directory has unknown parent '%s'!")
603 603 % short(key))
604 604 try:
605 605 if len(key) == 20:
606 606 key = hex(key)
607 607 except TypeError:
608 608 pass
609 609 raise error.RepoLookupError(_("unknown revision '%s'") % key)
610 610
611 611 def lookupbranch(self, key, remote=None):
612 612 repo = remote or self
613 613 if key in repo.branchmap():
614 614 return key
615 615
616 616 repo = (remote and remote.local()) and remote or self
617 617 return repo[key].branch()
618 618
619 619 def known(self, nodes):
620 620 nm = self.changelog.nodemap
621 621 return [(n in nm) for n in nodes]
622 622
623 623 def local(self):
624 624 return self
625 625
626 626 def join(self, f):
627 627 return os.path.join(self.path, f)
628 628
629 629 def wjoin(self, f):
630 630 return os.path.join(self.root, f)
631 631
632 632 def file(self, f):
633 633 if f[0] == '/':
634 634 f = f[1:]
635 635 return filelog.filelog(self.sopener, f)
636 636
637 637 def changectx(self, changeid):
638 638 return self[changeid]
639 639
640 640 def parents(self, changeid=None):
641 641 '''get list of changectxs for parents of changeid'''
642 642 return self[changeid].parents()
643 643
644 644 def filectx(self, path, changeid=None, fileid=None):
645 645 """changeid can be a changeset revision, node, or tag.
646 646 fileid can be a file revision or node."""
647 647 return context.filectx(self, path, changeid, fileid)
648 648
649 649 def getcwd(self):
650 650 return self.dirstate.getcwd()
651 651
652 652 def pathto(self, f, cwd=None):
653 653 return self.dirstate.pathto(f, cwd)
654 654
655 655 def wfile(self, f, mode='r'):
656 656 return self.wopener(f, mode)
657 657
658 658 def _link(self, f):
659 659 return os.path.islink(self.wjoin(f))
660 660
661 661 def _loadfilter(self, filter):
662 662 if filter not in self.filterpats:
663 663 l = []
664 664 for pat, cmd in self.ui.configitems(filter):
665 665 if cmd == '!':
666 666 continue
667 667 mf = matchmod.match(self.root, '', [pat])
668 668 fn = None
669 669 params = cmd
670 670 for name, filterfn in self._datafilters.iteritems():
671 671 if cmd.startswith(name):
672 672 fn = filterfn
673 673 params = cmd[len(name):].lstrip()
674 674 break
675 675 if not fn:
676 676 fn = lambda s, c, **kwargs: util.filter(s, c)
677 677 # Wrap old filters not supporting keyword arguments
678 678 if not inspect.getargspec(fn)[2]:
679 679 oldfn = fn
680 680 fn = lambda s, c, **kwargs: oldfn(s, c)
681 681 l.append((mf, fn, params))
682 682 self.filterpats[filter] = l
683 683 return self.filterpats[filter]
684 684
685 685 def _filter(self, filterpats, filename, data):
686 686 for mf, fn, cmd in filterpats:
687 687 if mf(filename):
688 688 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
689 689 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
690 690 break
691 691
692 692 return data
693 693
694 694 @propertycache
695 695 def _encodefilterpats(self):
696 696 return self._loadfilter('encode')
697 697
698 698 @propertycache
699 699 def _decodefilterpats(self):
700 700 return self._loadfilter('decode')
701 701
702 702 def adddatafilter(self, name, filter):
703 703 self._datafilters[name] = filter
704 704
705 705 def wread(self, filename):
706 706 if self._link(filename):
707 707 data = os.readlink(self.wjoin(filename))
708 708 else:
709 709 data = self.wopener.read(filename)
710 710 return self._filter(self._encodefilterpats, filename, data)
711 711
712 712 def wwrite(self, filename, data, flags):
713 713 data = self._filter(self._decodefilterpats, filename, data)
714 714 if 'l' in flags:
715 715 self.wopener.symlink(data, filename)
716 716 else:
717 717 self.wopener.write(filename, data)
718 718 if 'x' in flags:
719 719 util.setflags(self.wjoin(filename), False, True)
720 720
721 721 def wwritedata(self, filename, data):
722 722 return self._filter(self._decodefilterpats, filename, data)
723 723
724 724 def transaction(self, desc):
725 725 tr = self._transref and self._transref() or None
726 726 if tr and tr.running():
727 727 return tr.nest()
728 728
729 729 # abort here if the journal already exists
730 730 if os.path.exists(self.sjoin("journal")):
731 731 raise error.RepoError(
732 732 _("abandoned transaction found - run hg recover"))
733 733
734 734 journalfiles = self._writejournal(desc)
735 735 renames = [(x, undoname(x)) for x in journalfiles]
736 736
737 737 tr = transaction.transaction(self.ui.warn, self.sopener,
738 738 self.sjoin("journal"),
739 739 aftertrans(renames),
740 740 self.store.createmode)
741 741 self._transref = weakref.ref(tr)
742 742 return tr
743 743
744 744 def _writejournal(self, desc):
745 745 # save dirstate for rollback
746 746 try:
747 747 ds = self.opener.read("dirstate")
748 748 except IOError:
749 749 ds = ""
750 750 self.opener.write("journal.dirstate", ds)
751 751 self.opener.write("journal.branch",
752 752 encoding.fromlocal(self.dirstate.branch()))
753 753 self.opener.write("journal.desc",
754 754 "%d\n%s\n" % (len(self), desc))
755 755
756 756 bkname = self.join('bookmarks')
757 757 if os.path.exists(bkname):
758 758 util.copyfile(bkname, self.join('journal.bookmarks'))
759 759 else:
760 760 self.opener.write('journal.bookmarks', '')
761 phasesname = self.sjoin('phaseroots')
762 if os.path.exists(phasesname):
763 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
764 else:
765 self.sopener.write('journal.phaseroots', '')
761 766
762 767 return (self.sjoin('journal'), self.join('journal.dirstate'),
763 768 self.join('journal.branch'), self.join('journal.desc'),
764 self.join('journal.bookmarks'))
769 self.join('journal.bookmarks'),
770 self.sjoin('journal.phaseroots'))
765 771
766 772 def recover(self):
767 773 lock = self.lock()
768 774 try:
769 775 if os.path.exists(self.sjoin("journal")):
770 776 self.ui.status(_("rolling back interrupted transaction\n"))
771 777 transaction.rollback(self.sopener, self.sjoin("journal"),
772 778 self.ui.warn)
773 779 self.invalidate()
774 780 return True
775 781 else:
776 782 self.ui.warn(_("no interrupted transaction available\n"))
777 783 return False
778 784 finally:
779 785 lock.release()
780 786
781 787 def rollback(self, dryrun=False, force=False):
782 788 wlock = lock = None
783 789 try:
784 790 wlock = self.wlock()
785 791 lock = self.lock()
786 792 if os.path.exists(self.sjoin("undo")):
787 793 return self._rollback(dryrun, force)
788 794 else:
789 795 self.ui.warn(_("no rollback information available\n"))
790 796 return 1
791 797 finally:
792 798 release(lock, wlock)
793 799
794 800 def _rollback(self, dryrun, force):
795 801 ui = self.ui
796 802 try:
797 803 args = self.opener.read('undo.desc').splitlines()
798 804 (oldlen, desc, detail) = (int(args[0]), args[1], None)
799 805 if len(args) >= 3:
800 806 detail = args[2]
801 807 oldtip = oldlen - 1
802 808
803 809 if detail and ui.verbose:
804 810 msg = (_('repository tip rolled back to revision %s'
805 811 ' (undo %s: %s)\n')
806 812 % (oldtip, desc, detail))
807 813 else:
808 814 msg = (_('repository tip rolled back to revision %s'
809 815 ' (undo %s)\n')
810 816 % (oldtip, desc))
811 817 except IOError:
812 818 msg = _('rolling back unknown transaction\n')
813 819 desc = None
814 820
815 821 if not force and self['.'] != self['tip'] and desc == 'commit':
816 822 raise util.Abort(
817 823 _('rollback of last commit while not checked out '
818 824 'may lose data'), hint=_('use -f to force'))
819 825
820 826 ui.status(msg)
821 827 if dryrun:
822 828 return 0
823 829
824 830 parents = self.dirstate.parents()
825 831 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
826 832 if os.path.exists(self.join('undo.bookmarks')):
827 833 util.rename(self.join('undo.bookmarks'),
828 834 self.join('bookmarks'))
835 if os.path.exists(self.sjoin('undo.phaseroots')):
836 util.rename(self.sjoin('undo.phaseroots'),
837 self.sjoin('phaseroots'))
829 838 self.invalidate()
830 839
831 840 parentgone = (parents[0] not in self.changelog.nodemap or
832 841 parents[1] not in self.changelog.nodemap)
833 842 if parentgone:
834 843 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
835 844 try:
836 845 branch = self.opener.read('undo.branch')
837 846 self.dirstate.setbranch(branch)
838 847 except IOError:
839 848 ui.warn(_('named branch could not be reset: '
840 849 'current branch is still \'%s\'\n')
841 850 % self.dirstate.branch())
842 851
843 852 self.dirstate.invalidate()
844 853 self.destroyed()
845 854 parents = tuple([p.rev() for p in self.parents()])
846 855 if len(parents) > 1:
847 856 ui.status(_('working directory now based on '
848 857 'revisions %d and %d\n') % parents)
849 858 else:
850 859 ui.status(_('working directory now based on '
851 860 'revision %d\n') % parents)
852 861 return 0
853 862
854 863 def invalidatecaches(self):
855 864 try:
856 865 delattr(self, '_tagscache')
857 866 except AttributeError:
858 867 pass
859 868
860 869 self._branchcache = None # in UTF-8
861 870 self._branchcachetip = None
862 871
863 872 def invalidatedirstate(self):
864 873 '''Invalidates the dirstate, causing the next call to dirstate
865 874 to check if it was modified since the last time it was read,
866 875 rereading it if it has.
867 876
868 877 This is different to dirstate.invalidate() that it doesn't always
869 878 rereads the dirstate. Use dirstate.invalidate() if you want to
870 879 explicitly read the dirstate again (i.e. restoring it to a previous
871 880 known good state).'''
872 881 try:
873 882 delattr(self, 'dirstate')
874 883 except AttributeError:
875 884 pass
876 885
877 886 def invalidate(self):
878 887 for k in self._filecache:
879 888 # dirstate is invalidated separately in invalidatedirstate()
880 889 if k == 'dirstate':
881 890 continue
882 891
883 892 try:
884 893 delattr(self, k)
885 894 except AttributeError:
886 895 pass
887 896 self.invalidatecaches()
888 897
889 898 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
890 899 try:
891 900 l = lock.lock(lockname, 0, releasefn, desc=desc)
892 901 except error.LockHeld, inst:
893 902 if not wait:
894 903 raise
895 904 self.ui.warn(_("waiting for lock on %s held by %r\n") %
896 905 (desc, inst.locker))
897 906 # default to 600 seconds timeout
898 907 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
899 908 releasefn, desc=desc)
900 909 if acquirefn:
901 910 acquirefn()
902 911 return l
903 912
904 913 def lock(self, wait=True):
905 914 '''Lock the repository store (.hg/store) and return a weak reference
906 915 to the lock. Use this before modifying the store (e.g. committing or
907 916 stripping). If you are opening a transaction, get a lock as well.)'''
908 917 l = self._lockref and self._lockref()
909 918 if l is not None and l.held:
910 919 l.lock()
911 920 return l
912 921
913 922 def unlock():
914 923 self.store.write()
915 924 if self._dirtyphases:
916 925 phases.writeroots(self)
917 926 for k, ce in self._filecache.items():
918 927 if k == 'dirstate':
919 928 continue
920 929 ce.refresh()
921 930
922 931 l = self._lock(self.sjoin("lock"), wait, unlock,
923 932 self.invalidate, _('repository %s') % self.origroot)
924 933 self._lockref = weakref.ref(l)
925 934 return l
926 935
927 936 def wlock(self, wait=True):
928 937 '''Lock the non-store parts of the repository (everything under
929 938 .hg except .hg/store) and return a weak reference to the lock.
930 939 Use this before modifying files in .hg.'''
931 940 l = self._wlockref and self._wlockref()
932 941 if l is not None and l.held:
933 942 l.lock()
934 943 return l
935 944
936 945 def unlock():
937 946 self.dirstate.write()
938 947 ce = self._filecache.get('dirstate')
939 948 if ce:
940 949 ce.refresh()
941 950
942 951 l = self._lock(self.join("wlock"), wait, unlock,
943 952 self.invalidatedirstate, _('working directory of %s') %
944 953 self.origroot)
945 954 self._wlockref = weakref.ref(l)
946 955 return l
947 956
948 957 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
949 958 """
950 959 commit an individual file as part of a larger transaction
951 960 """
952 961
953 962 fname = fctx.path()
954 963 text = fctx.data()
955 964 flog = self.file(fname)
956 965 fparent1 = manifest1.get(fname, nullid)
957 966 fparent2 = fparent2o = manifest2.get(fname, nullid)
958 967
959 968 meta = {}
960 969 copy = fctx.renamed()
961 970 if copy and copy[0] != fname:
962 971 # Mark the new revision of this file as a copy of another
963 972 # file. This copy data will effectively act as a parent
964 973 # of this new revision. If this is a merge, the first
965 974 # parent will be the nullid (meaning "look up the copy data")
966 975 # and the second one will be the other parent. For example:
967 976 #
968 977 # 0 --- 1 --- 3 rev1 changes file foo
969 978 # \ / rev2 renames foo to bar and changes it
970 979 # \- 2 -/ rev3 should have bar with all changes and
971 980 # should record that bar descends from
972 981 # bar in rev2 and foo in rev1
973 982 #
974 983 # this allows this merge to succeed:
975 984 #
976 985 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
977 986 # \ / merging rev3 and rev4 should use bar@rev2
978 987 # \- 2 --- 4 as the merge base
979 988 #
980 989
981 990 cfname = copy[0]
982 991 crev = manifest1.get(cfname)
983 992 newfparent = fparent2
984 993
985 994 if manifest2: # branch merge
986 995 if fparent2 == nullid or crev is None: # copied on remote side
987 996 if cfname in manifest2:
988 997 crev = manifest2[cfname]
989 998 newfparent = fparent1
990 999
991 1000 # find source in nearest ancestor if we've lost track
992 1001 if not crev:
993 1002 self.ui.debug(" %s: searching for copy revision for %s\n" %
994 1003 (fname, cfname))
995 1004 for ancestor in self[None].ancestors():
996 1005 if cfname in ancestor:
997 1006 crev = ancestor[cfname].filenode()
998 1007 break
999 1008
1000 1009 if crev:
1001 1010 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1002 1011 meta["copy"] = cfname
1003 1012 meta["copyrev"] = hex(crev)
1004 1013 fparent1, fparent2 = nullid, newfparent
1005 1014 else:
1006 1015 self.ui.warn(_("warning: can't find ancestor for '%s' "
1007 1016 "copied from '%s'!\n") % (fname, cfname))
1008 1017
1009 1018 elif fparent2 != nullid:
1010 1019 # is one parent an ancestor of the other?
1011 1020 fparentancestor = flog.ancestor(fparent1, fparent2)
1012 1021 if fparentancestor == fparent1:
1013 1022 fparent1, fparent2 = fparent2, nullid
1014 1023 elif fparentancestor == fparent2:
1015 1024 fparent2 = nullid
1016 1025
1017 1026 # is the file changed?
1018 1027 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1019 1028 changelist.append(fname)
1020 1029 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1021 1030
1022 1031 # are just the flags changed during merge?
1023 1032 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1024 1033 changelist.append(fname)
1025 1034
1026 1035 return fparent1
1027 1036
1028 1037 def commit(self, text="", user=None, date=None, match=None, force=False,
1029 1038 editor=False, extra={}):
1030 1039 """Add a new revision to current repository.
1031 1040
1032 1041 Revision information is gathered from the working directory,
1033 1042 match can be used to filter the committed files. If editor is
1034 1043 supplied, it is called to get a commit message.
1035 1044 """
1036 1045
1037 1046 def fail(f, msg):
1038 1047 raise util.Abort('%s: %s' % (f, msg))
1039 1048
1040 1049 if not match:
1041 1050 match = matchmod.always(self.root, '')
1042 1051
1043 1052 if not force:
1044 1053 vdirs = []
1045 1054 match.dir = vdirs.append
1046 1055 match.bad = fail
1047 1056
1048 1057 wlock = self.wlock()
1049 1058 try:
1050 1059 wctx = self[None]
1051 1060 merge = len(wctx.parents()) > 1
1052 1061
1053 1062 if (not force and merge and match and
1054 1063 (match.files() or match.anypats())):
1055 1064 raise util.Abort(_('cannot partially commit a merge '
1056 1065 '(do not specify files or patterns)'))
1057 1066
1058 1067 changes = self.status(match=match, clean=force)
1059 1068 if force:
1060 1069 changes[0].extend(changes[6]) # mq may commit unchanged files
1061 1070
1062 1071 # check subrepos
1063 1072 subs = []
1064 1073 removedsubs = set()
1065 1074 if '.hgsub' in wctx:
1066 1075 # only manage subrepos and .hgsubstate if .hgsub is present
1067 1076 for p in wctx.parents():
1068 1077 removedsubs.update(s for s in p.substate if match(s))
1069 1078 for s in wctx.substate:
1070 1079 removedsubs.discard(s)
1071 1080 if match(s) and wctx.sub(s).dirty():
1072 1081 subs.append(s)
1073 1082 if (subs or removedsubs):
1074 1083 if (not match('.hgsub') and
1075 1084 '.hgsub' in (wctx.modified() + wctx.added())):
1076 1085 raise util.Abort(
1077 1086 _("can't commit subrepos without .hgsub"))
1078 1087 if '.hgsubstate' not in changes[0]:
1079 1088 changes[0].insert(0, '.hgsubstate')
1080 1089 if '.hgsubstate' in changes[2]:
1081 1090 changes[2].remove('.hgsubstate')
1082 1091 elif '.hgsub' in changes[2]:
1083 1092 # clean up .hgsubstate when .hgsub is removed
1084 1093 if ('.hgsubstate' in wctx and
1085 1094 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1086 1095 changes[2].insert(0, '.hgsubstate')
1087 1096
1088 1097 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1089 1098 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1090 1099 if changedsubs:
1091 1100 raise util.Abort(_("uncommitted changes in subrepo %s")
1092 1101 % changedsubs[0],
1093 1102 hint=_("use --subrepos for recursive commit"))
1094 1103
1095 1104 # make sure all explicit patterns are matched
1096 1105 if not force and match.files():
1097 1106 matched = set(changes[0] + changes[1] + changes[2])
1098 1107
1099 1108 for f in match.files():
1100 1109 if f == '.' or f in matched or f in wctx.substate:
1101 1110 continue
1102 1111 if f in changes[3]: # missing
1103 1112 fail(f, _('file not found!'))
1104 1113 if f in vdirs: # visited directory
1105 1114 d = f + '/'
1106 1115 for mf in matched:
1107 1116 if mf.startswith(d):
1108 1117 break
1109 1118 else:
1110 1119 fail(f, _("no match under directory!"))
1111 1120 elif f not in self.dirstate:
1112 1121 fail(f, _("file not tracked!"))
1113 1122
1114 1123 if (not force and not extra.get("close") and not merge
1115 1124 and not (changes[0] or changes[1] or changes[2])
1116 1125 and wctx.branch() == wctx.p1().branch()):
1117 1126 return None
1118 1127
1119 1128 ms = mergemod.mergestate(self)
1120 1129 for f in changes[0]:
1121 1130 if f in ms and ms[f] == 'u':
1122 1131 raise util.Abort(_("unresolved merge conflicts "
1123 1132 "(see hg help resolve)"))
1124 1133
1125 1134 cctx = context.workingctx(self, text, user, date, extra, changes)
1126 1135 if editor:
1127 1136 cctx._text = editor(self, cctx, subs)
1128 1137 edited = (text != cctx._text)
1129 1138
1130 1139 # commit subs
1131 1140 if subs or removedsubs:
1132 1141 state = wctx.substate.copy()
1133 1142 for s in sorted(subs):
1134 1143 sub = wctx.sub(s)
1135 1144 self.ui.status(_('committing subrepository %s\n') %
1136 1145 subrepo.subrelpath(sub))
1137 1146 sr = sub.commit(cctx._text, user, date)
1138 1147 state[s] = (state[s][0], sr)
1139 1148 subrepo.writestate(self, state)
1140 1149
1141 1150 # Save commit message in case this transaction gets rolled back
1142 1151 # (e.g. by a pretxncommit hook). Leave the content alone on
1143 1152 # the assumption that the user will use the same editor again.
1144 1153 msgfn = self.savecommitmessage(cctx._text)
1145 1154
1146 1155 p1, p2 = self.dirstate.parents()
1147 1156 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1148 1157 try:
1149 1158 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1150 1159 ret = self.commitctx(cctx, True)
1151 1160 except:
1152 1161 if edited:
1153 1162 self.ui.write(
1154 1163 _('note: commit message saved in %s\n') % msgfn)
1155 1164 raise
1156 1165
1157 1166 # update bookmarks, dirstate and mergestate
1158 1167 bookmarks.update(self, p1, ret)
1159 1168 for f in changes[0] + changes[1]:
1160 1169 self.dirstate.normal(f)
1161 1170 for f in changes[2]:
1162 1171 self.dirstate.drop(f)
1163 1172 self.dirstate.setparents(ret)
1164 1173 ms.reset()
1165 1174 finally:
1166 1175 wlock.release()
1167 1176
1168 1177 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1169 1178 return ret
1170 1179
1171 1180 def commitctx(self, ctx, error=False):
1172 1181 """Add a new revision to current repository.
1173 1182 Revision information is passed via the context argument.
1174 1183 """
1175 1184
1176 1185 tr = lock = None
1177 1186 removed = list(ctx.removed())
1178 1187 p1, p2 = ctx.p1(), ctx.p2()
1179 1188 user = ctx.user()
1180 1189
1181 1190 lock = self.lock()
1182 1191 try:
1183 1192 tr = self.transaction("commit")
1184 1193 trp = weakref.proxy(tr)
1185 1194
1186 1195 if ctx.files():
1187 1196 m1 = p1.manifest().copy()
1188 1197 m2 = p2.manifest()
1189 1198
1190 1199 # check in files
1191 1200 new = {}
1192 1201 changed = []
1193 1202 linkrev = len(self)
1194 1203 for f in sorted(ctx.modified() + ctx.added()):
1195 1204 self.ui.note(f + "\n")
1196 1205 try:
1197 1206 fctx = ctx[f]
1198 1207 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1199 1208 changed)
1200 1209 m1.set(f, fctx.flags())
1201 1210 except OSError, inst:
1202 1211 self.ui.warn(_("trouble committing %s!\n") % f)
1203 1212 raise
1204 1213 except IOError, inst:
1205 1214 errcode = getattr(inst, 'errno', errno.ENOENT)
1206 1215 if error or errcode and errcode != errno.ENOENT:
1207 1216 self.ui.warn(_("trouble committing %s!\n") % f)
1208 1217 raise
1209 1218 else:
1210 1219 removed.append(f)
1211 1220
1212 1221 # update manifest
1213 1222 m1.update(new)
1214 1223 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1215 1224 drop = [f for f in removed if f in m1]
1216 1225 for f in drop:
1217 1226 del m1[f]
1218 1227 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1219 1228 p2.manifestnode(), (new, drop))
1220 1229 files = changed + removed
1221 1230 else:
1222 1231 mn = p1.manifestnode()
1223 1232 files = []
1224 1233
1225 1234 # update changelog
1226 1235 self.changelog.delayupdate()
1227 1236 n = self.changelog.add(mn, files, ctx.description(),
1228 1237 trp, p1.node(), p2.node(),
1229 1238 user, ctx.date(), ctx.extra().copy())
1230 1239 p = lambda: self.changelog.writepending() and self.root or ""
1231 1240 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1232 1241 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1233 1242 parent2=xp2, pending=p)
1234 1243 self.changelog.finalize(trp)
1235 1244 tr.close()
1236 1245
1237 1246 if self._branchcache:
1238 1247 self.updatebranchcache()
1239 1248 return n
1240 1249 finally:
1241 1250 if tr:
1242 1251 tr.release()
1243 1252 lock.release()
1244 1253
1245 1254 def destroyed(self):
1246 1255 '''Inform the repository that nodes have been destroyed.
1247 1256 Intended for use by strip and rollback, so there's a common
1248 1257 place for anything that has to be done after destroying history.'''
1249 1258 # XXX it might be nice if we could take the list of destroyed
1250 1259 # nodes, but I don't see an easy way for rollback() to do that
1251 1260
1252 1261 # Ensure the persistent tag cache is updated. Doing it now
1253 1262 # means that the tag cache only has to worry about destroyed
1254 1263 # heads immediately after a strip/rollback. That in turn
1255 1264 # guarantees that "cachetip == currenttip" (comparing both rev
1256 1265 # and node) always means no nodes have been added or destroyed.
1257 1266
1258 1267 # XXX this is suboptimal when qrefresh'ing: we strip the current
1259 1268 # head, refresh the tag cache, then immediately add a new head.
1260 1269 # But I think doing it this way is necessary for the "instant
1261 1270 # tag cache retrieval" case to work.
1262 1271 self.invalidatecaches()
1263 1272
1264 1273 def walk(self, match, node=None):
1265 1274 '''
1266 1275 walk recursively through the directory tree or a given
1267 1276 changeset, finding all files matched by the match
1268 1277 function
1269 1278 '''
1270 1279 return self[node].walk(match)
1271 1280
1272 1281 def status(self, node1='.', node2=None, match=None,
1273 1282 ignored=False, clean=False, unknown=False,
1274 1283 listsubrepos=False):
1275 1284 """return status of files between two nodes or node and working directory
1276 1285
1277 1286 If node1 is None, use the first dirstate parent instead.
1278 1287 If node2 is None, compare node1 with working directory.
1279 1288 """
1280 1289
1281 1290 def mfmatches(ctx):
1282 1291 mf = ctx.manifest().copy()
1283 1292 for fn in mf.keys():
1284 1293 if not match(fn):
1285 1294 del mf[fn]
1286 1295 return mf
1287 1296
1288 1297 if isinstance(node1, context.changectx):
1289 1298 ctx1 = node1
1290 1299 else:
1291 1300 ctx1 = self[node1]
1292 1301 if isinstance(node2, context.changectx):
1293 1302 ctx2 = node2
1294 1303 else:
1295 1304 ctx2 = self[node2]
1296 1305
1297 1306 working = ctx2.rev() is None
1298 1307 parentworking = working and ctx1 == self['.']
1299 1308 match = match or matchmod.always(self.root, self.getcwd())
1300 1309 listignored, listclean, listunknown = ignored, clean, unknown
1301 1310
1302 1311 # load earliest manifest first for caching reasons
1303 1312 if not working and ctx2.rev() < ctx1.rev():
1304 1313 ctx2.manifest()
1305 1314
1306 1315 if not parentworking:
1307 1316 def bad(f, msg):
1308 1317 if f not in ctx1:
1309 1318 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1310 1319 match.bad = bad
1311 1320
1312 1321 if working: # we need to scan the working dir
1313 1322 subrepos = []
1314 1323 if '.hgsub' in self.dirstate:
1315 1324 subrepos = ctx2.substate.keys()
1316 1325 s = self.dirstate.status(match, subrepos, listignored,
1317 1326 listclean, listunknown)
1318 1327 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1319 1328
1320 1329 # check for any possibly clean files
1321 1330 if parentworking and cmp:
1322 1331 fixup = []
1323 1332 # do a full compare of any files that might have changed
1324 1333 for f in sorted(cmp):
1325 1334 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1326 1335 or ctx1[f].cmp(ctx2[f])):
1327 1336 modified.append(f)
1328 1337 else:
1329 1338 fixup.append(f)
1330 1339
1331 1340 # update dirstate for files that are actually clean
1332 1341 if fixup:
1333 1342 if listclean:
1334 1343 clean += fixup
1335 1344
1336 1345 try:
1337 1346 # updating the dirstate is optional
1338 1347 # so we don't wait on the lock
1339 1348 wlock = self.wlock(False)
1340 1349 try:
1341 1350 for f in fixup:
1342 1351 self.dirstate.normal(f)
1343 1352 finally:
1344 1353 wlock.release()
1345 1354 except error.LockError:
1346 1355 pass
1347 1356
1348 1357 if not parentworking:
1349 1358 mf1 = mfmatches(ctx1)
1350 1359 if working:
1351 1360 # we are comparing working dir against non-parent
1352 1361 # generate a pseudo-manifest for the working dir
1353 1362 mf2 = mfmatches(self['.'])
1354 1363 for f in cmp + modified + added:
1355 1364 mf2[f] = None
1356 1365 mf2.set(f, ctx2.flags(f))
1357 1366 for f in removed:
1358 1367 if f in mf2:
1359 1368 del mf2[f]
1360 1369 else:
1361 1370 # we are comparing two revisions
1362 1371 deleted, unknown, ignored = [], [], []
1363 1372 mf2 = mfmatches(ctx2)
1364 1373
1365 1374 modified, added, clean = [], [], []
1366 1375 for fn in mf2:
1367 1376 if fn in mf1:
1368 1377 if (fn not in deleted and
1369 1378 (mf1.flags(fn) != mf2.flags(fn) or
1370 1379 (mf1[fn] != mf2[fn] and
1371 1380 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1372 1381 modified.append(fn)
1373 1382 elif listclean:
1374 1383 clean.append(fn)
1375 1384 del mf1[fn]
1376 1385 elif fn not in deleted:
1377 1386 added.append(fn)
1378 1387 removed = mf1.keys()
1379 1388
1380 1389 if working and modified and not self.dirstate._checklink:
1381 1390 # Symlink placeholders may get non-symlink-like contents
1382 1391 # via user error or dereferencing by NFS or Samba servers,
1383 1392 # so we filter out any placeholders that don't look like a
1384 1393 # symlink
1385 1394 sane = []
1386 1395 for f in modified:
1387 1396 if ctx2.flags(f) == 'l':
1388 1397 d = ctx2[f].data()
1389 1398 if len(d) >= 1024 or '\n' in d or util.binary(d):
1390 1399 self.ui.debug('ignoring suspect symlink placeholder'
1391 1400 ' "%s"\n' % f)
1392 1401 continue
1393 1402 sane.append(f)
1394 1403 modified = sane
1395 1404
1396 1405 r = modified, added, removed, deleted, unknown, ignored, clean
1397 1406
1398 1407 if listsubrepos:
1399 1408 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1400 1409 if working:
1401 1410 rev2 = None
1402 1411 else:
1403 1412 rev2 = ctx2.substate[subpath][1]
1404 1413 try:
1405 1414 submatch = matchmod.narrowmatcher(subpath, match)
1406 1415 s = sub.status(rev2, match=submatch, ignored=listignored,
1407 1416 clean=listclean, unknown=listunknown,
1408 1417 listsubrepos=True)
1409 1418 for rfiles, sfiles in zip(r, s):
1410 1419 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1411 1420 except error.LookupError:
1412 1421 self.ui.status(_("skipping missing subrepository: %s\n")
1413 1422 % subpath)
1414 1423
1415 1424 for l in r:
1416 1425 l.sort()
1417 1426 return r
1418 1427
1419 1428 def heads(self, start=None):
1420 1429 heads = self.changelog.heads(start)
1421 1430 # sort the output in rev descending order
1422 1431 return sorted(heads, key=self.changelog.rev, reverse=True)
1423 1432
1424 1433 def branchheads(self, branch=None, start=None, closed=False):
1425 1434 '''return a (possibly filtered) list of heads for the given branch
1426 1435
1427 1436 Heads are returned in topological order, from newest to oldest.
1428 1437 If branch is None, use the dirstate branch.
1429 1438 If start is not None, return only heads reachable from start.
1430 1439 If closed is True, return heads that are marked as closed as well.
1431 1440 '''
1432 1441 if branch is None:
1433 1442 branch = self[None].branch()
1434 1443 branches = self.branchmap()
1435 1444 if branch not in branches:
1436 1445 return []
1437 1446 # the cache returns heads ordered lowest to highest
1438 1447 bheads = list(reversed(branches[branch]))
1439 1448 if start is not None:
1440 1449 # filter out the heads that cannot be reached from startrev
1441 1450 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1442 1451 bheads = [h for h in bheads if h in fbheads]
1443 1452 if not closed:
1444 1453 bheads = [h for h in bheads if
1445 1454 ('close' not in self.changelog.read(h)[5])]
1446 1455 return bheads
1447 1456
1448 1457 def branches(self, nodes):
1449 1458 if not nodes:
1450 1459 nodes = [self.changelog.tip()]
1451 1460 b = []
1452 1461 for n in nodes:
1453 1462 t = n
1454 1463 while True:
1455 1464 p = self.changelog.parents(n)
1456 1465 if p[1] != nullid or p[0] == nullid:
1457 1466 b.append((t, n, p[0], p[1]))
1458 1467 break
1459 1468 n = p[0]
1460 1469 return b
1461 1470
1462 1471 def between(self, pairs):
1463 1472 r = []
1464 1473
1465 1474 for top, bottom in pairs:
1466 1475 n, l, i = top, [], 0
1467 1476 f = 1
1468 1477
1469 1478 while n != bottom and n != nullid:
1470 1479 p = self.changelog.parents(n)[0]
1471 1480 if i == f:
1472 1481 l.append(n)
1473 1482 f = f * 2
1474 1483 n = p
1475 1484 i += 1
1476 1485
1477 1486 r.append(l)
1478 1487
1479 1488 return r
1480 1489
1481 1490 def pull(self, remote, heads=None, force=False):
1482 1491 lock = self.lock()
1483 1492 try:
1484 1493 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1485 1494 force=force)
1486 1495 common, fetch, rheads = tmp
1487 1496 if not fetch:
1488 1497 self.ui.status(_("no changes found\n"))
1489 1498 result = 0
1490 1499 else:
1491 1500 if heads is None and list(common) == [nullid]:
1492 1501 self.ui.status(_("requesting all changes\n"))
1493 1502 elif heads is None and remote.capable('changegroupsubset'):
1494 1503 # issue1320, avoid a race if remote changed after discovery
1495 1504 heads = rheads
1496 1505
1497 1506 if remote.capable('getbundle'):
1498 1507 cg = remote.getbundle('pull', common=common,
1499 1508 heads=heads or rheads)
1500 1509 elif heads is None:
1501 1510 cg = remote.changegroup(fetch, 'pull')
1502 1511 elif not remote.capable('changegroupsubset'):
1503 1512 raise util.Abort(_("partial pull cannot be done because "
1504 1513 "other repository doesn't support "
1505 1514 "changegroupsubset."))
1506 1515 else:
1507 1516 cg = remote.changegroupsubset(fetch, heads, 'pull')
1508 1517 result = self.addchangegroup(cg, 'pull', remote.url(),
1509 1518 lock=lock)
1510 1519 finally:
1511 1520 lock.release()
1512 1521
1513 1522 return result
1514 1523
1515 1524 def checkpush(self, force, revs):
1516 1525 """Extensions can override this function if additional checks have
1517 1526 to be performed before pushing, or call it if they override push
1518 1527 command.
1519 1528 """
1520 1529 pass
1521 1530
1522 1531 def push(self, remote, force=False, revs=None, newbranch=False):
1523 1532 '''Push outgoing changesets (limited by revs) from the current
1524 1533 repository to remote. Return an integer:
1525 1534 - 0 means HTTP error *or* nothing to push
1526 1535 - 1 means we pushed and remote head count is unchanged *or*
1527 1536 we have outgoing changesets but refused to push
1528 1537 - other values as described by addchangegroup()
1529 1538 '''
1530 1539 # there are two ways to push to remote repo:
1531 1540 #
1532 1541 # addchangegroup assumes local user can lock remote
1533 1542 # repo (local filesystem, old ssh servers).
1534 1543 #
1535 1544 # unbundle assumes local user cannot lock remote repo (new ssh
1536 1545 # servers, http servers).
1537 1546
1538 1547 self.checkpush(force, revs)
1539 1548 lock = None
1540 1549 unbundle = remote.capable('unbundle')
1541 1550 if not unbundle:
1542 1551 lock = remote.lock()
1543 1552 try:
1544 1553 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1545 1554 newbranch)
1546 1555 ret = remote_heads
1547 1556 if cg is not None:
1548 1557 if unbundle:
1549 1558 # local repo finds heads on server, finds out what
1550 1559 # revs it must push. once revs transferred, if server
1551 1560 # finds it has different heads (someone else won
1552 1561 # commit/push race), server aborts.
1553 1562 if force:
1554 1563 remote_heads = ['force']
1555 1564 # ssh: return remote's addchangegroup()
1556 1565 # http: return remote's addchangegroup() or 0 for error
1557 1566 ret = remote.unbundle(cg, remote_heads, 'push')
1558 1567 else:
1559 1568 # we return an integer indicating remote head count change
1560 1569 ret = remote.addchangegroup(cg, 'push', self.url(),
1561 1570 lock=lock)
1562 1571 finally:
1563 1572 if lock is not None:
1564 1573 lock.release()
1565 1574
1566 1575 self.ui.debug("checking for updated bookmarks\n")
1567 1576 rb = remote.listkeys('bookmarks')
1568 1577 for k in rb.keys():
1569 1578 if k in self._bookmarks:
1570 1579 nr, nl = rb[k], hex(self._bookmarks[k])
1571 1580 if nr in self:
1572 1581 cr = self[nr]
1573 1582 cl = self[nl]
1574 1583 if cl in cr.descendants():
1575 1584 r = remote.pushkey('bookmarks', k, nr, nl)
1576 1585 if r:
1577 1586 self.ui.status(_("updating bookmark %s\n") % k)
1578 1587 else:
1579 1588 self.ui.warn(_('updating bookmark %s'
1580 1589 ' failed!\n') % k)
1581 1590
1582 1591 return ret
1583 1592
1584 1593 def changegroupinfo(self, nodes, source):
1585 1594 if self.ui.verbose or source == 'bundle':
1586 1595 self.ui.status(_("%d changesets found\n") % len(nodes))
1587 1596 if self.ui.debugflag:
1588 1597 self.ui.debug("list of changesets:\n")
1589 1598 for node in nodes:
1590 1599 self.ui.debug("%s\n" % hex(node))
1591 1600
1592 1601 def changegroupsubset(self, bases, heads, source):
1593 1602 """Compute a changegroup consisting of all the nodes that are
1594 1603 descendants of any of the bases and ancestors of any of the heads.
1595 1604 Return a chunkbuffer object whose read() method will return
1596 1605 successive changegroup chunks.
1597 1606
1598 1607 It is fairly complex as determining which filenodes and which
1599 1608 manifest nodes need to be included for the changeset to be complete
1600 1609 is non-trivial.
1601 1610
1602 1611 Another wrinkle is doing the reverse, figuring out which changeset in
1603 1612 the changegroup a particular filenode or manifestnode belongs to.
1604 1613 """
1605 1614 cl = self.changelog
1606 1615 if not bases:
1607 1616 bases = [nullid]
1608 1617 csets, bases, heads = cl.nodesbetween(bases, heads)
1609 1618 # We assume that all ancestors of bases are known
1610 1619 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1611 1620 return self._changegroupsubset(common, csets, heads, source)
1612 1621
1613 1622 def getbundle(self, source, heads=None, common=None):
1614 1623 """Like changegroupsubset, but returns the set difference between the
1615 1624 ancestors of heads and the ancestors common.
1616 1625
1617 1626 If heads is None, use the local heads. If common is None, use [nullid].
1618 1627
1619 1628 The nodes in common might not all be known locally due to the way the
1620 1629 current discovery protocol works.
1621 1630 """
1622 1631 cl = self.changelog
1623 1632 if common:
1624 1633 nm = cl.nodemap
1625 1634 common = [n for n in common if n in nm]
1626 1635 else:
1627 1636 common = [nullid]
1628 1637 if not heads:
1629 1638 heads = cl.heads()
1630 1639 common, missing = cl.findcommonmissing(common, heads)
1631 1640 if not missing:
1632 1641 return None
1633 1642 return self._changegroupsubset(common, missing, heads, source)
1634 1643
1635 1644 def _changegroupsubset(self, commonrevs, csets, heads, source):
1636 1645
1637 1646 cl = self.changelog
1638 1647 mf = self.manifest
1639 1648 mfs = {} # needed manifests
1640 1649 fnodes = {} # needed file nodes
1641 1650 changedfiles = set()
1642 1651 fstate = ['', {}]
1643 1652 count = [0]
1644 1653
1645 1654 # can we go through the fast path ?
1646 1655 heads.sort()
1647 1656 if heads == sorted(self.heads()):
1648 1657 return self._changegroup(csets, source)
1649 1658
1650 1659 # slow path
1651 1660 self.hook('preoutgoing', throw=True, source=source)
1652 1661 self.changegroupinfo(csets, source)
1653 1662
1654 1663 # filter any nodes that claim to be part of the known set
1655 1664 def prune(revlog, missing):
1656 1665 return [n for n in missing
1657 1666 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1658 1667
1659 1668 def lookup(revlog, x):
1660 1669 if revlog == cl:
1661 1670 c = cl.read(x)
1662 1671 changedfiles.update(c[3])
1663 1672 mfs.setdefault(c[0], x)
1664 1673 count[0] += 1
1665 1674 self.ui.progress(_('bundling'), count[0],
1666 1675 unit=_('changesets'), total=len(csets))
1667 1676 return x
1668 1677 elif revlog == mf:
1669 1678 clnode = mfs[x]
1670 1679 mdata = mf.readfast(x)
1671 1680 for f in changedfiles:
1672 1681 if f in mdata:
1673 1682 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1674 1683 count[0] += 1
1675 1684 self.ui.progress(_('bundling'), count[0],
1676 1685 unit=_('manifests'), total=len(mfs))
1677 1686 return mfs[x]
1678 1687 else:
1679 1688 self.ui.progress(
1680 1689 _('bundling'), count[0], item=fstate[0],
1681 1690 unit=_('files'), total=len(changedfiles))
1682 1691 return fstate[1][x]
1683 1692
1684 1693 bundler = changegroup.bundle10(lookup)
1685 1694 reorder = self.ui.config('bundle', 'reorder', 'auto')
1686 1695 if reorder == 'auto':
1687 1696 reorder = None
1688 1697 else:
1689 1698 reorder = util.parsebool(reorder)
1690 1699
1691 1700 def gengroup():
1692 1701 # Create a changenode group generator that will call our functions
1693 1702 # back to lookup the owning changenode and collect information.
1694 1703 for chunk in cl.group(csets, bundler, reorder=reorder):
1695 1704 yield chunk
1696 1705 self.ui.progress(_('bundling'), None)
1697 1706
1698 1707 # Create a generator for the manifestnodes that calls our lookup
1699 1708 # and data collection functions back.
1700 1709 count[0] = 0
1701 1710 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1702 1711 yield chunk
1703 1712 self.ui.progress(_('bundling'), None)
1704 1713
1705 1714 mfs.clear()
1706 1715
1707 1716 # Go through all our files in order sorted by name.
1708 1717 count[0] = 0
1709 1718 for fname in sorted(changedfiles):
1710 1719 filerevlog = self.file(fname)
1711 1720 if not len(filerevlog):
1712 1721 raise util.Abort(_("empty or missing revlog for %s") % fname)
1713 1722 fstate[0] = fname
1714 1723 fstate[1] = fnodes.pop(fname, {})
1715 1724
1716 1725 nodelist = prune(filerevlog, fstate[1])
1717 1726 if nodelist:
1718 1727 count[0] += 1
1719 1728 yield bundler.fileheader(fname)
1720 1729 for chunk in filerevlog.group(nodelist, bundler, reorder):
1721 1730 yield chunk
1722 1731
1723 1732 # Signal that no more groups are left.
1724 1733 yield bundler.close()
1725 1734 self.ui.progress(_('bundling'), None)
1726 1735
1727 1736 if csets:
1728 1737 self.hook('outgoing', node=hex(csets[0]), source=source)
1729 1738
1730 1739 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1731 1740
1732 1741 def changegroup(self, basenodes, source):
1733 1742 # to avoid a race we use changegroupsubset() (issue1320)
1734 1743 return self.changegroupsubset(basenodes, self.heads(), source)
1735 1744
1736 1745 def _changegroup(self, nodes, source):
1737 1746 """Compute the changegroup of all nodes that we have that a recipient
1738 1747 doesn't. Return a chunkbuffer object whose read() method will return
1739 1748 successive changegroup chunks.
1740 1749
1741 1750 This is much easier than the previous function as we can assume that
1742 1751 the recipient has any changenode we aren't sending them.
1743 1752
1744 1753 nodes is the set of nodes to send"""
1745 1754
1746 1755 cl = self.changelog
1747 1756 mf = self.manifest
1748 1757 mfs = {}
1749 1758 changedfiles = set()
1750 1759 fstate = ['']
1751 1760 count = [0]
1752 1761
1753 1762 self.hook('preoutgoing', throw=True, source=source)
1754 1763 self.changegroupinfo(nodes, source)
1755 1764
1756 1765 revset = set([cl.rev(n) for n in nodes])
1757 1766
1758 1767 def gennodelst(log):
1759 1768 return [log.node(r) for r in log if log.linkrev(r) in revset]
1760 1769
1761 1770 def lookup(revlog, x):
1762 1771 if revlog == cl:
1763 1772 c = cl.read(x)
1764 1773 changedfiles.update(c[3])
1765 1774 mfs.setdefault(c[0], x)
1766 1775 count[0] += 1
1767 1776 self.ui.progress(_('bundling'), count[0],
1768 1777 unit=_('changesets'), total=len(nodes))
1769 1778 return x
1770 1779 elif revlog == mf:
1771 1780 count[0] += 1
1772 1781 self.ui.progress(_('bundling'), count[0],
1773 1782 unit=_('manifests'), total=len(mfs))
1774 1783 return cl.node(revlog.linkrev(revlog.rev(x)))
1775 1784 else:
1776 1785 self.ui.progress(
1777 1786 _('bundling'), count[0], item=fstate[0],
1778 1787 total=len(changedfiles), unit=_('files'))
1779 1788 return cl.node(revlog.linkrev(revlog.rev(x)))
1780 1789
1781 1790 bundler = changegroup.bundle10(lookup)
1782 1791 reorder = self.ui.config('bundle', 'reorder', 'auto')
1783 1792 if reorder == 'auto':
1784 1793 reorder = None
1785 1794 else:
1786 1795 reorder = util.parsebool(reorder)
1787 1796
1788 1797 def gengroup():
1789 1798 '''yield a sequence of changegroup chunks (strings)'''
1790 1799 # construct a list of all changed files
1791 1800
1792 1801 for chunk in cl.group(nodes, bundler, reorder=reorder):
1793 1802 yield chunk
1794 1803 self.ui.progress(_('bundling'), None)
1795 1804
1796 1805 count[0] = 0
1797 1806 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1798 1807 yield chunk
1799 1808 self.ui.progress(_('bundling'), None)
1800 1809
1801 1810 count[0] = 0
1802 1811 for fname in sorted(changedfiles):
1803 1812 filerevlog = self.file(fname)
1804 1813 if not len(filerevlog):
1805 1814 raise util.Abort(_("empty or missing revlog for %s") % fname)
1806 1815 fstate[0] = fname
1807 1816 nodelist = gennodelst(filerevlog)
1808 1817 if nodelist:
1809 1818 count[0] += 1
1810 1819 yield bundler.fileheader(fname)
1811 1820 for chunk in filerevlog.group(nodelist, bundler, reorder):
1812 1821 yield chunk
1813 1822 yield bundler.close()
1814 1823 self.ui.progress(_('bundling'), None)
1815 1824
1816 1825 if nodes:
1817 1826 self.hook('outgoing', node=hex(nodes[0]), source=source)
1818 1827
1819 1828 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1820 1829
1821 1830 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1822 1831 """Add the changegroup returned by source.read() to this repo.
1823 1832 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1824 1833 the URL of the repo where this changegroup is coming from.
1825 1834 If lock is not None, the function takes ownership of the lock
1826 1835 and releases it after the changegroup is added.
1827 1836
1828 1837 Return an integer summarizing the change to this repo:
1829 1838 - nothing changed or no source: 0
1830 1839 - more heads than before: 1+added heads (2..n)
1831 1840 - fewer heads than before: -1-removed heads (-2..-n)
1832 1841 - number of heads stays the same: 1
1833 1842 """
1834 1843 def csmap(x):
1835 1844 self.ui.debug("add changeset %s\n" % short(x))
1836 1845 return len(cl)
1837 1846
1838 1847 def revmap(x):
1839 1848 return cl.rev(x)
1840 1849
1841 1850 if not source:
1842 1851 return 0
1843 1852
1844 1853 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1845 1854
1846 1855 changesets = files = revisions = 0
1847 1856 efiles = set()
1848 1857
1849 1858 # write changelog data to temp files so concurrent readers will not see
1850 1859 # inconsistent view
1851 1860 cl = self.changelog
1852 1861 cl.delayupdate()
1853 1862 oldheads = cl.heads()
1854 1863
1855 1864 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1856 1865 try:
1857 1866 trp = weakref.proxy(tr)
1858 1867 # pull off the changeset group
1859 1868 self.ui.status(_("adding changesets\n"))
1860 1869 clstart = len(cl)
1861 1870 class prog(object):
1862 1871 step = _('changesets')
1863 1872 count = 1
1864 1873 ui = self.ui
1865 1874 total = None
1866 1875 def __call__(self):
1867 1876 self.ui.progress(self.step, self.count, unit=_('chunks'),
1868 1877 total=self.total)
1869 1878 self.count += 1
1870 1879 pr = prog()
1871 1880 source.callback = pr
1872 1881
1873 1882 source.changelogheader()
1874 1883 if (cl.addgroup(source, csmap, trp) is None
1875 1884 and not emptyok):
1876 1885 raise util.Abort(_("received changelog group is empty"))
1877 1886 clend = len(cl)
1878 1887 changesets = clend - clstart
1879 1888 for c in xrange(clstart, clend):
1880 1889 efiles.update(self[c].files())
1881 1890 efiles = len(efiles)
1882 1891 self.ui.progress(_('changesets'), None)
1883 1892
1884 1893 # pull off the manifest group
1885 1894 self.ui.status(_("adding manifests\n"))
1886 1895 pr.step = _('manifests')
1887 1896 pr.count = 1
1888 1897 pr.total = changesets # manifests <= changesets
1889 1898 # no need to check for empty manifest group here:
1890 1899 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1891 1900 # no new manifest will be created and the manifest group will
1892 1901 # be empty during the pull
1893 1902 source.manifestheader()
1894 1903 self.manifest.addgroup(source, revmap, trp)
1895 1904 self.ui.progress(_('manifests'), None)
1896 1905
1897 1906 needfiles = {}
1898 1907 if self.ui.configbool('server', 'validate', default=False):
1899 1908 # validate incoming csets have their manifests
1900 1909 for cset in xrange(clstart, clend):
1901 1910 mfest = self.changelog.read(self.changelog.node(cset))[0]
1902 1911 mfest = self.manifest.readdelta(mfest)
1903 1912 # store file nodes we must see
1904 1913 for f, n in mfest.iteritems():
1905 1914 needfiles.setdefault(f, set()).add(n)
1906 1915
1907 1916 # process the files
1908 1917 self.ui.status(_("adding file changes\n"))
1909 1918 pr.step = _('files')
1910 1919 pr.count = 1
1911 1920 pr.total = efiles
1912 1921 source.callback = None
1913 1922
1914 1923 while True:
1915 1924 chunkdata = source.filelogheader()
1916 1925 if not chunkdata:
1917 1926 break
1918 1927 f = chunkdata["filename"]
1919 1928 self.ui.debug("adding %s revisions\n" % f)
1920 1929 pr()
1921 1930 fl = self.file(f)
1922 1931 o = len(fl)
1923 1932 if fl.addgroup(source, revmap, trp) is None:
1924 1933 raise util.Abort(_("received file revlog group is empty"))
1925 1934 revisions += len(fl) - o
1926 1935 files += 1
1927 1936 if f in needfiles:
1928 1937 needs = needfiles[f]
1929 1938 for new in xrange(o, len(fl)):
1930 1939 n = fl.node(new)
1931 1940 if n in needs:
1932 1941 needs.remove(n)
1933 1942 if not needs:
1934 1943 del needfiles[f]
1935 1944 self.ui.progress(_('files'), None)
1936 1945
1937 1946 for f, needs in needfiles.iteritems():
1938 1947 fl = self.file(f)
1939 1948 for n in needs:
1940 1949 try:
1941 1950 fl.rev(n)
1942 1951 except error.LookupError:
1943 1952 raise util.Abort(
1944 1953 _('missing file data for %s:%s - run hg verify') %
1945 1954 (f, hex(n)))
1946 1955
1947 1956 dh = 0
1948 1957 if oldheads:
1949 1958 heads = cl.heads()
1950 1959 dh = len(heads) - len(oldheads)
1951 1960 for h in heads:
1952 1961 if h not in oldheads and 'close' in self[h].extra():
1953 1962 dh -= 1
1954 1963 htext = ""
1955 1964 if dh:
1956 1965 htext = _(" (%+d heads)") % dh
1957 1966
1958 1967 self.ui.status(_("added %d changesets"
1959 1968 " with %d changes to %d files%s\n")
1960 1969 % (changesets, revisions, files, htext))
1961 1970
1962 1971 if changesets > 0:
1963 1972 p = lambda: cl.writepending() and self.root or ""
1964 1973 self.hook('pretxnchangegroup', throw=True,
1965 1974 node=hex(cl.node(clstart)), source=srctype,
1966 1975 url=url, pending=p)
1967 1976
1968 1977 # make changelog see real files again
1969 1978 cl.finalize(trp)
1970 1979
1971 1980 tr.close()
1972 1981 finally:
1973 1982 tr.release()
1974 1983 if lock:
1975 1984 lock.release()
1976 1985
1977 1986 if changesets > 0:
1978 1987 # forcefully update the on-disk branch cache
1979 1988 self.ui.debug("updating the branch cache\n")
1980 1989 self.updatebranchcache()
1981 1990 self.hook("changegroup", node=hex(cl.node(clstart)),
1982 1991 source=srctype, url=url)
1983 1992
1984 1993 for i in xrange(clstart, clend):
1985 1994 self.hook("incoming", node=hex(cl.node(i)),
1986 1995 source=srctype, url=url)
1987 1996
1988 1997 # never return 0 here:
1989 1998 if dh < 0:
1990 1999 return dh - 1
1991 2000 else:
1992 2001 return dh + 1
1993 2002
1994 2003 def stream_in(self, remote, requirements):
1995 2004 lock = self.lock()
1996 2005 try:
1997 2006 fp = remote.stream_out()
1998 2007 l = fp.readline()
1999 2008 try:
2000 2009 resp = int(l)
2001 2010 except ValueError:
2002 2011 raise error.ResponseError(
2003 2012 _('Unexpected response from remote server:'), l)
2004 2013 if resp == 1:
2005 2014 raise util.Abort(_('operation forbidden by server'))
2006 2015 elif resp == 2:
2007 2016 raise util.Abort(_('locking the remote repository failed'))
2008 2017 elif resp != 0:
2009 2018 raise util.Abort(_('the server sent an unknown error code'))
2010 2019 self.ui.status(_('streaming all changes\n'))
2011 2020 l = fp.readline()
2012 2021 try:
2013 2022 total_files, total_bytes = map(int, l.split(' ', 1))
2014 2023 except (ValueError, TypeError):
2015 2024 raise error.ResponseError(
2016 2025 _('Unexpected response from remote server:'), l)
2017 2026 self.ui.status(_('%d files to transfer, %s of data\n') %
2018 2027 (total_files, util.bytecount(total_bytes)))
2019 2028 start = time.time()
2020 2029 for i in xrange(total_files):
2021 2030 # XXX doesn't support '\n' or '\r' in filenames
2022 2031 l = fp.readline()
2023 2032 try:
2024 2033 name, size = l.split('\0', 1)
2025 2034 size = int(size)
2026 2035 except (ValueError, TypeError):
2027 2036 raise error.ResponseError(
2028 2037 _('Unexpected response from remote server:'), l)
2029 2038 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2030 2039 # for backwards compat, name was partially encoded
2031 2040 ofp = self.sopener(store.decodedir(name), 'w')
2032 2041 for chunk in util.filechunkiter(fp, limit=size):
2033 2042 ofp.write(chunk)
2034 2043 ofp.close()
2035 2044 elapsed = time.time() - start
2036 2045 if elapsed <= 0:
2037 2046 elapsed = 0.001
2038 2047 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2039 2048 (util.bytecount(total_bytes), elapsed,
2040 2049 util.bytecount(total_bytes / elapsed)))
2041 2050
2042 2051 # new requirements = old non-format requirements + new format-related
2043 2052 # requirements from the streamed-in repository
2044 2053 requirements.update(set(self.requirements) - self.supportedformats)
2045 2054 self._applyrequirements(requirements)
2046 2055 self._writerequirements()
2047 2056
2048 2057 self.invalidate()
2049 2058 return len(self.heads()) + 1
2050 2059 finally:
2051 2060 lock.release()
2052 2061
2053 2062 def clone(self, remote, heads=[], stream=False):
2054 2063 '''clone remote repository.
2055 2064
2056 2065 keyword arguments:
2057 2066 heads: list of revs to clone (forces use of pull)
2058 2067 stream: use streaming clone if possible'''
2059 2068
2060 2069 # now, all clients that can request uncompressed clones can
2061 2070 # read repo formats supported by all servers that can serve
2062 2071 # them.
2063 2072
2064 2073 # if revlog format changes, client will have to check version
2065 2074 # and format flags on "stream" capability, and use
2066 2075 # uncompressed only if compatible.
2067 2076
2068 2077 if stream and not heads:
2069 2078 # 'stream' means remote revlog format is revlogv1 only
2070 2079 if remote.capable('stream'):
2071 2080 return self.stream_in(remote, set(('revlogv1',)))
2072 2081 # otherwise, 'streamreqs' contains the remote revlog format
2073 2082 streamreqs = remote.capable('streamreqs')
2074 2083 if streamreqs:
2075 2084 streamreqs = set(streamreqs.split(','))
2076 2085 # if we support it, stream in and adjust our requirements
2077 2086 if not streamreqs - self.supportedformats:
2078 2087 return self.stream_in(remote, streamreqs)
2079 2088 return self.pull(remote, heads)
2080 2089
2081 2090 def pushkey(self, namespace, key, old, new):
2082 2091 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2083 2092 old=old, new=new)
2084 2093 ret = pushkey.push(self, namespace, key, old, new)
2085 2094 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2086 2095 ret=ret)
2087 2096 return ret
2088 2097
2089 2098 def listkeys(self, namespace):
2090 2099 self.hook('prelistkeys', throw=True, namespace=namespace)
2091 2100 values = pushkey.list(self, namespace)
2092 2101 self.hook('listkeys', namespace=namespace, values=values)
2093 2102 return values
2094 2103
2095 2104 def debugwireargs(self, one, two, three=None, four=None, five=None):
2096 2105 '''used to test argument passing over the wire'''
2097 2106 return "%s %s %s %s %s" % (one, two, three, four, five)
2098 2107
2099 2108 def savecommitmessage(self, text):
2100 2109 fp = self.opener('last-message.txt', 'wb')
2101 2110 try:
2102 2111 fp.write(text)
2103 2112 finally:
2104 2113 fp.close()
2105 2114 return self.pathto(fp.name[len(self.root)+1:])
2106 2115
2107 2116 # used to avoid circular references so destructors work
2108 2117 def aftertrans(files):
2109 2118 renamefiles = [tuple(t) for t in files]
2110 2119 def a():
2111 2120 for src, dest in renamefiles:
2112 2121 util.rename(src, dest)
2113 2122 return a
2114 2123
2115 2124 def undoname(fn):
2116 2125 base, name = os.path.split(fn)
2117 2126 assert name.startswith('journal')
2118 2127 return os.path.join(base, name.replace('journal', 'undo', 1))
2119 2128
2120 2129 def instance(ui, path, create):
2121 2130 return localrepository(ui, util.urllocalpath(path), create)
2122 2131
2123 2132 def islocal(path):
2124 2133 return True
@@ -1,111 +1,113
1 1 Init repo1:
2 2
3 3 $ hg init repo1
4 4 $ cd repo1
5 5 $ echo "some text" > a
6 6 $ hg add
7 7 adding a
8 8 $ hg ci -m first
9 9 $ cat .hg/store/fncache | sort
10 10 data/a.i
11 11
12 12 Testing a.i/b:
13 13
14 14 $ mkdir a.i
15 15 $ echo "some other text" > a.i/b
16 16 $ hg add
17 17 adding a.i/b (glob)
18 18 $ hg ci -m second
19 19 $ cat .hg/store/fncache | sort
20 20 data/a.i
21 21 data/a.i.hg/b.i
22 22
23 23 Testing a.i.hg/c:
24 24
25 25 $ mkdir a.i.hg
26 26 $ echo "yet another text" > a.i.hg/c
27 27 $ hg add
28 28 adding a.i.hg/c (glob)
29 29 $ hg ci -m third
30 30 $ cat .hg/store/fncache | sort
31 31 data/a.i
32 32 data/a.i.hg.hg/c.i
33 33 data/a.i.hg/b.i
34 34
35 35 Testing verify:
36 36
37 37 $ hg verify
38 38 checking changesets
39 39 checking manifests
40 40 crosschecking files in changesets and manifests
41 41 checking files
42 42 3 files, 3 changesets, 3 total revisions
43 43
44 44 $ rm .hg/store/fncache
45 45
46 46 $ hg verify
47 47 checking changesets
48 48 checking manifests
49 49 crosschecking files in changesets and manifests
50 50 checking files
51 51 data/a.i@0: missing revlog!
52 52 data/a.i.hg/c.i@2: missing revlog!
53 53 data/a.i/b.i@1: missing revlog!
54 54 3 files, 3 changesets, 3 total revisions
55 55 3 integrity errors encountered!
56 56 (first damaged changeset appears to be 0)
57 57 [1]
58 58 $ cd ..
59 59
60 60 Non store repo:
61 61
62 62 $ hg --config format.usestore=False init foo
63 63 $ cd foo
64 64 $ mkdir tst.d
65 65 $ echo foo > tst.d/foo
66 66 $ hg ci -Amfoo
67 67 adding tst.d/foo
68 68 $ find .hg | sort
69 69 .hg
70 70 .hg/00changelog.i
71 71 .hg/00manifest.i
72 72 .hg/data
73 73 .hg/data/tst.d.hg
74 74 .hg/data/tst.d.hg/foo.i
75 75 .hg/dirstate
76 76 .hg/last-message.txt
77 77 .hg/requires
78 78 .hg/undo
79 79 .hg/undo.bookmarks
80 80 .hg/undo.branch
81 81 .hg/undo.desc
82 82 .hg/undo.dirstate
83 .hg/undo.phaseroots
83 84 $ cd ..
84 85
85 86 Non fncache repo:
86 87
87 88 $ hg --config format.usefncache=False init bar
88 89 $ cd bar
89 90 $ mkdir tst.d
90 91 $ echo foo > tst.d/Foo
91 92 $ hg ci -Amfoo
92 93 adding tst.d/Foo
93 94 $ find .hg | sort
94 95 .hg
95 96 .hg/00changelog.i
96 97 .hg/dirstate
97 98 .hg/last-message.txt
98 99 .hg/requires
99 100 .hg/store
100 101 .hg/store/00changelog.i
101 102 .hg/store/00manifest.i
102 103 .hg/store/data
103 104 .hg/store/data/tst.d.hg
104 105 .hg/store/data/tst.d.hg/_foo.i
105 106 .hg/store/undo
107 .hg/store/undo.phaseroots
106 108 .hg/undo.bookmarks
107 109 .hg/undo.branch
108 110 .hg/undo.desc
109 111 .hg/undo.dirstate
110 112 $ cd ..
111 113
@@ -1,334 +1,340
1 1 $ "$TESTDIR/hghave" no-windows || exit 80
2 2
3 3 $ cat > nlinks.py <<EOF
4 4 > import os, sys
5 5 > for f in sorted(sys.stdin.readlines()):
6 6 > f = f[:-1]
7 7 > print os.lstat(f).st_nlink, f
8 8 > EOF
9 9
10 10 $ nlinksdir()
11 11 > {
12 12 > find $1 -type f | python $TESTTMP/nlinks.py
13 13 > }
14 14
15 15 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
16 16
17 17 $ cat > linkcp.py <<EOF
18 18 > from mercurial import util
19 19 > import sys
20 20 > util.copyfiles(sys.argv[1], sys.argv[2], hardlink=True)
21 21 > EOF
22 22
23 23 $ linkcp()
24 24 > {
25 25 > python $TESTTMP/linkcp.py $1 $2
26 26 > }
27 27
28 28 Prepare repo r1:
29 29
30 30 $ hg init r1
31 31 $ cd r1
32 32
33 33 $ echo c1 > f1
34 34 $ hg add f1
35 35 $ hg ci -m0
36 36
37 37 $ mkdir d1
38 38 $ cd d1
39 39 $ echo c2 > f2
40 40 $ hg add f2
41 41 $ hg ci -m1
42 42 $ cd ../..
43 43
44 44 $ nlinksdir r1/.hg/store
45 45 1 r1/.hg/store/00changelog.i
46 46 1 r1/.hg/store/00manifest.i
47 47 1 r1/.hg/store/data/d1/f2.i
48 48 1 r1/.hg/store/data/f1.i
49 49 1 r1/.hg/store/fncache
50 50 1 r1/.hg/store/undo
51 1 r1/.hg/store/undo.phaseroots
51 52
52 53
53 54 Create hardlinked clone r2:
54 55
55 56 $ hg clone -U --debug r1 r2
56 57 linked 7 files
57 58
58 59 Create non-hardlinked clone r3:
59 60
60 61 $ hg clone --pull r1 r3
61 62 requesting all changes
62 63 adding changesets
63 64 adding manifests
64 65 adding file changes
65 66 added 2 changesets with 2 changes to 2 files
66 67 updating to branch default
67 68 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 69
69 70
70 71 Repos r1 and r2 should now contain hardlinked files:
71 72
72 73 $ nlinksdir r1/.hg/store
73 74 2 r1/.hg/store/00changelog.i
74 75 2 r1/.hg/store/00manifest.i
75 76 2 r1/.hg/store/data/d1/f2.i
76 77 2 r1/.hg/store/data/f1.i
77 78 2 r1/.hg/store/fncache
78 79 1 r1/.hg/store/undo
80 1 r1/.hg/store/undo.phaseroots
79 81
80 82 $ nlinksdir r2/.hg/store
81 83 2 r2/.hg/store/00changelog.i
82 84 2 r2/.hg/store/00manifest.i
83 85 2 r2/.hg/store/data/d1/f2.i
84 86 2 r2/.hg/store/data/f1.i
85 87 2 r2/.hg/store/fncache
86 88
87 89 Repo r3 should not be hardlinked:
88 90
89 91 $ nlinksdir r3/.hg/store
90 92 1 r3/.hg/store/00changelog.i
91 93 1 r3/.hg/store/00manifest.i
92 94 1 r3/.hg/store/data/d1/f2.i
93 95 1 r3/.hg/store/data/f1.i
94 96 1 r3/.hg/store/fncache
95 97 1 r3/.hg/store/undo
98 1 r3/.hg/store/undo.phaseroots
96 99
97 100
98 101 Create a non-inlined filelog in r3:
99 102
100 103 $ cd r3/d1
101 104 $ python -c 'for x in range(10000): print x' >> data1
102 105 $ for j in 0 1 2 3 4 5 6 7 8 9; do
103 106 > cat data1 >> f2
104 107 > hg commit -m$j
105 108 > done
106 109 $ cd ../..
107 110
108 111 $ nlinksdir r3/.hg/store
109 112 1 r3/.hg/store/00changelog.i
110 113 1 r3/.hg/store/00manifest.i
111 114 1 r3/.hg/store/data/d1/f2.d
112 115 1 r3/.hg/store/data/d1/f2.i
113 116 1 r3/.hg/store/data/f1.i
114 117 1 r3/.hg/store/fncache
115 118 1 r3/.hg/store/undo
119 1 r3/.hg/store/undo.phaseroots
116 120
117 121 Push to repo r1 should break up most hardlinks in r2:
118 122
119 123 $ hg -R r2 verify
120 124 checking changesets
121 125 checking manifests
122 126 crosschecking files in changesets and manifests
123 127 checking files
124 128 2 files, 2 changesets, 2 total revisions
125 129
126 130 $ cd r3
127 131 $ hg push
128 132 pushing to $TESTTMP/r1
129 133 searching for changes
130 134 adding changesets
131 135 adding manifests
132 136 adding file changes
133 137 added 10 changesets with 10 changes to 1 files
134 138
135 139 $ cd ..
136 140
137 141 $ nlinksdir r2/.hg/store
138 142 1 r2/.hg/store/00changelog.i
139 143 1 r2/.hg/store/00manifest.i
140 144 1 r2/.hg/store/data/d1/f2.i
141 145 2 r2/.hg/store/data/f1.i
142 146 1 r2/.hg/store/fncache
143 147
144 148 $ hg -R r2 verify
145 149 checking changesets
146 150 checking manifests
147 151 crosschecking files in changesets and manifests
148 152 checking files
149 153 2 files, 2 changesets, 2 total revisions
150 154
151 155
152 156 $ cd r1
153 157 $ hg up
154 158 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
155 159
156 160 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
157 161
158 162 $ echo c1c1 >> f1
159 163 $ hg ci -m00
160 164 $ cd ..
161 165
162 166 $ nlinksdir r2/.hg/store
163 167 1 r2/.hg/store/00changelog.i
164 168 1 r2/.hg/store/00manifest.i
165 169 1 r2/.hg/store/data/d1/f2.i
166 170 1 r2/.hg/store/data/f1.i
167 171 1 r2/.hg/store/fncache
168 172
169 173
170 174 $ cd r3
171 175 $ hg tip --template '{rev}:{node|short}\n'
172 176 11:a6451b6bc41f
173 177 $ echo bla > f1
174 178 $ hg ci -m1
175 179 $ cd ..
176 180
177 181 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
178 182
179 183 $ linkcp r3 r4
180 184
181 185 r4 has hardlinks in the working dir (not just inside .hg):
182 186
183 187 $ nlinksdir r4
184 188 2 r4/.hg/00changelog.i
185 189 2 r4/.hg/branch
186 190 2 r4/.hg/cache/branchheads
187 191 2 r4/.hg/cache/tags
188 192 2 r4/.hg/dirstate
189 193 2 r4/.hg/hgrc
190 194 2 r4/.hg/last-message.txt
191 195 2 r4/.hg/requires
192 196 2 r4/.hg/store/00changelog.i
193 197 2 r4/.hg/store/00manifest.i
194 198 2 r4/.hg/store/data/d1/f2.d
195 199 2 r4/.hg/store/data/d1/f2.i
196 200 2 r4/.hg/store/data/f1.i
197 201 2 r4/.hg/store/fncache
198 202 2 r4/.hg/store/undo
203 2 r4/.hg/store/undo.phaseroots
199 204 2 r4/.hg/undo.bookmarks
200 205 2 r4/.hg/undo.branch
201 206 2 r4/.hg/undo.desc
202 207 2 r4/.hg/undo.dirstate
203 208 2 r4/d1/data1
204 209 2 r4/d1/f2
205 210 2 r4/f1
206 211
207 212 Update back to revision 11 in r4 should break hardlink of file f1:
208 213
209 214 $ hg -R r4 up 11
210 215 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
211 216
212 217 $ nlinksdir r4
213 218 2 r4/.hg/00changelog.i
214 219 1 r4/.hg/branch
215 220 2 r4/.hg/cache/branchheads
216 221 2 r4/.hg/cache/tags
217 222 1 r4/.hg/dirstate
218 223 2 r4/.hg/hgrc
219 224 2 r4/.hg/last-message.txt
220 225 2 r4/.hg/requires
221 226 2 r4/.hg/store/00changelog.i
222 227 2 r4/.hg/store/00manifest.i
223 228 2 r4/.hg/store/data/d1/f2.d
224 229 2 r4/.hg/store/data/d1/f2.i
225 230 2 r4/.hg/store/data/f1.i
226 231 2 r4/.hg/store/fncache
227 232 2 r4/.hg/store/undo
233 2 r4/.hg/store/undo.phaseroots
228 234 2 r4/.hg/undo.bookmarks
229 235 2 r4/.hg/undo.branch
230 236 2 r4/.hg/undo.desc
231 237 2 r4/.hg/undo.dirstate
232 238 2 r4/d1/data1
233 239 2 r4/d1/f2
234 240 1 r4/f1
235 241
236 242
237 243 Test hardlinking outside hg:
238 244
239 245 $ mkdir x
240 246 $ echo foo > x/a
241 247
242 248 $ linkcp x y
243 249 $ echo bar >> y/a
244 250
245 251 No diff if hardlink:
246 252
247 253 $ diff x/a y/a
248 254
249 255 Test mq hardlinking:
250 256
251 257 $ echo "[extensions]" >> $HGRCPATH
252 258 $ echo "mq=" >> $HGRCPATH
253 259
254 260 $ hg init a
255 261 $ cd a
256 262
257 263 $ hg qimport -n foo - << EOF
258 264 > # HG changeset patch
259 265 > # Date 1 0
260 266 > diff -r 2588a8b53d66 a
261 267 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
262 268 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
263 269 > @@ -0,0 +1,1 @@
264 270 > +a
265 271 > EOF
266 272 adding foo to series file
267 273
268 274 $ hg qpush
269 275 applying foo
270 276 now at: foo
271 277
272 278 $ cd ..
273 279 $ linkcp a b
274 280 $ cd b
275 281
276 282 $ hg qimport -n bar - << EOF
277 283 > # HG changeset patch
278 284 > # Date 2 0
279 285 > diff -r 2588a8b53d66 a
280 286 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
281 287 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
282 288 > @@ -0,0 +1,1 @@
283 289 > +b
284 290 > EOF
285 291 adding bar to series file
286 292
287 293 $ hg qpush
288 294 applying bar
289 295 now at: bar
290 296
291 297 $ cat .hg/patches/status
292 298 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
293 299 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
294 300
295 301 $ cat .hg/patches/series
296 302 foo
297 303 bar
298 304
299 305 $ cat ../a/.hg/patches/status
300 306 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
301 307
302 308 $ cat ../a/.hg/patches/series
303 309 foo
304 310
305 311 Test tags hardlinking:
306 312
307 313 $ hg qdel -r qbase:qtip
308 314 patch foo finalized without changeset message
309 315 patch bar finalized without changeset message
310 316
311 317 $ hg tag -l lfoo
312 318 $ hg tag foo
313 319
314 320 $ cd ..
315 321 $ linkcp b c
316 322 $ cd c
317 323
318 324 $ hg tag -l -r 0 lbar
319 325 $ hg tag -r 0 bar
320 326
321 327 $ cat .hgtags
322 328 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
323 329 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
324 330
325 331 $ cat .hg/localtags
326 332 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
327 333 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
328 334
329 335 $ cat ../b/.hgtags
330 336 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
331 337
332 338 $ cat ../b/.hg/localtags
333 339 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
334 340
@@ -1,20 +1,20
1 1 Test hangup signal in the middle of transaction
2 2
3 3 $ "$TESTDIR/hghave" serve fifo || exit 80
4 4 $ hg init
5 5 $ mkfifo p
6 6 $ hg serve --stdio < p &
7 7 $ P=$!
8 8 $ (echo lock; echo addchangegroup; sleep 5) > p &
9 9 $ Q=$!
10 10 $ sleep 3
11 11 0
12 12 0
13 13 adding changesets
14 14 $ kill -HUP $P
15 15 $ wait
16 16 transaction abort!
17 17 rollback completed
18 18 killed!
19 19 $ echo .hg/* .hg/store/*
20 .hg/00changelog.i .hg/journal.bookmarks .hg/journal.branch .hg/journal.desc .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a
20 .hg/00changelog.i .hg/journal.bookmarks .hg/journal.branch .hg/journal.desc .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a .hg/store/journal.phaseroots
@@ -1,143 +1,145
1 1 test that new files created in .hg inherit the permissions from .hg/store
2 2
3 3
4 4 $ "$TESTDIR/hghave" unix-permissions || exit 80
5 5
6 6 $ mkdir dir
7 7
8 8 just in case somebody has a strange $TMPDIR
9 9
10 10 $ chmod g-s dir
11 11 $ cd dir
12 12
13 13 $ cat >printmodes.py <<EOF
14 14 > import os, sys
15 15 >
16 16 > allnames = []
17 17 > isdir = {}
18 18 > for root, dirs, files in os.walk(sys.argv[1]):
19 19 > for d in dirs:
20 20 > name = os.path.join(root, d)
21 21 > isdir[name] = 1
22 22 > allnames.append(name)
23 23 > for f in files:
24 24 > name = os.path.join(root, f)
25 25 > allnames.append(name)
26 26 > allnames.sort()
27 27 > for name in allnames:
28 28 > suffix = name in isdir and '/' or ''
29 29 > print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
30 30 > EOF
31 31
32 32 $ cat >mode.py <<EOF
33 33 > import sys
34 34 > import os
35 35 > print '%05o' % os.lstat(sys.argv[1]).st_mode
36 36 > EOF
37 37
38 38 $ umask 077
39 39
40 40 $ hg init repo
41 41 $ cd repo
42 42
43 43 $ chmod 0770 .hg/store
44 44
45 45 before commit
46 46 store can be written by the group, other files cannot
47 47 store is setgid
48 48
49 49 $ python ../printmodes.py .
50 50 00700 ./.hg/
51 51 00600 ./.hg/00changelog.i
52 52 00600 ./.hg/requires
53 53 00770 ./.hg/store/
54 54
55 55 $ mkdir dir
56 56 $ touch foo dir/bar
57 57 $ hg ci -qAm 'add files'
58 58
59 59 after commit
60 60 working dir files can only be written by the owner
61 61 files created in .hg can be written by the group
62 62 (in particular, store/**, dirstate, branch cache file, undo files)
63 63 new directories are setgid
64 64
65 65 $ python ../printmodes.py .
66 66 00700 ./.hg/
67 67 00600 ./.hg/00changelog.i
68 68 00660 ./.hg/dirstate
69 69 00660 ./.hg/last-message.txt
70 70 00600 ./.hg/requires
71 71 00770 ./.hg/store/
72 72 00660 ./.hg/store/00changelog.i
73 73 00660 ./.hg/store/00manifest.i
74 74 00770 ./.hg/store/data/
75 75 00770 ./.hg/store/data/dir/
76 76 00660 ./.hg/store/data/dir/bar.i
77 77 00660 ./.hg/store/data/foo.i
78 78 00660 ./.hg/store/fncache
79 79 00660 ./.hg/store/undo
80 00660 ./.hg/store/undo.phaseroots
80 81 00660 ./.hg/undo.bookmarks
81 82 00660 ./.hg/undo.branch
82 83 00660 ./.hg/undo.desc
83 84 00660 ./.hg/undo.dirstate
84 85 00700 ./dir/
85 86 00600 ./dir/bar
86 87 00600 ./foo
87 88
88 89 $ umask 007
89 90 $ hg init ../push
90 91
91 92 before push
92 93 group can write everything
93 94
94 95 $ python ../printmodes.py ../push
95 96 00770 ../push/.hg/
96 97 00660 ../push/.hg/00changelog.i
97 98 00660 ../push/.hg/requires
98 99 00770 ../push/.hg/store/
99 100
100 101 $ umask 077
101 102 $ hg -q push ../push
102 103
103 104 after push
104 105 group can still write everything
105 106
106 107 $ python ../printmodes.py ../push
107 108 00770 ../push/.hg/
108 109 00660 ../push/.hg/00changelog.i
109 110 00770 ../push/.hg/cache/
110 111 00660 ../push/.hg/cache/branchheads
111 112 00660 ../push/.hg/requires
112 113 00770 ../push/.hg/store/
113 114 00660 ../push/.hg/store/00changelog.i
114 115 00660 ../push/.hg/store/00manifest.i
115 116 00770 ../push/.hg/store/data/
116 117 00770 ../push/.hg/store/data/dir/
117 118 00660 ../push/.hg/store/data/dir/bar.i
118 119 00660 ../push/.hg/store/data/foo.i
119 120 00660 ../push/.hg/store/fncache
120 121 00660 ../push/.hg/store/undo
122 00660 ../push/.hg/store/undo.phaseroots
121 123 00660 ../push/.hg/undo.bookmarks
122 124 00660 ../push/.hg/undo.branch
123 125 00660 ../push/.hg/undo.desc
124 126 00660 ../push/.hg/undo.dirstate
125 127
126 128
127 129 Test that we don't lose the setgid bit when we call chmod.
128 130 Not all systems support setgid directories (e.g. HFS+), so
129 131 just check that directories have the same mode.
130 132
131 133 $ cd ..
132 134 $ hg init setgid
133 135 $ cd setgid
134 136 $ chmod g+rwx .hg/store
135 137 $ chmod g+s .hg/store 2> /dev/null
136 138 $ mkdir dir
137 139 $ touch dir/file
138 140 $ hg ci -qAm 'add dir/file'
139 141 $ storemode=`python ../mode.py .hg/store`
140 142 $ dirmode=`python ../mode.py .hg/store/data/dir`
141 143 $ if [ "$storemode" != "$dirmode" ]; then
142 144 > echo "$storemode != $dirmode"
143 145 $ fi
General Comments 0
You need to be logged in to leave comments. Login now