##// END OF EJS Templates
phases: set new commit in 1-phase
Pierre-Yves David -
r15483:9ae766f2 default
parent child Browse files
Show More
@@ -1,2135 +1,2137 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39 self._dirtyphases = False
40 40
41 41 try:
42 42 self.ui.readconfig(self.join("hgrc"), self.root)
43 43 extensions.loadall(self.ui)
44 44 except IOError:
45 45 pass
46 46
47 47 if not os.path.isdir(self.path):
48 48 if create:
49 49 if not os.path.exists(path):
50 50 util.makedirs(path)
51 51 util.makedir(self.path, notindexed=True)
52 52 requirements = ["revlogv1"]
53 53 if self.ui.configbool('format', 'usestore', True):
54 54 os.mkdir(os.path.join(self.path, "store"))
55 55 requirements.append("store")
56 56 if self.ui.configbool('format', 'usefncache', True):
57 57 requirements.append("fncache")
58 58 if self.ui.configbool('format', 'dotencode', True):
59 59 requirements.append('dotencode')
60 60 # create an invalid changelog
61 61 self.opener.append(
62 62 "00changelog.i",
63 63 '\0\0\0\2' # represents revlogv2
64 64 ' dummy changelog to prevent using the old repo layout'
65 65 )
66 66 if self.ui.configbool('format', 'generaldelta', False):
67 67 requirements.append("generaldelta")
68 68 requirements = set(requirements)
69 69 else:
70 70 raise error.RepoError(_("repository %s not found") % path)
71 71 elif create:
72 72 raise error.RepoError(_("repository %s already exists") % path)
73 73 else:
74 74 try:
75 75 requirements = scmutil.readrequires(self.opener, self.supported)
76 76 except IOError, inst:
77 77 if inst.errno != errno.ENOENT:
78 78 raise
79 79 requirements = set()
80 80
81 81 self.sharedpath = self.path
82 82 try:
83 83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 84 if not os.path.exists(s):
85 85 raise error.RepoError(
86 86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 87 self.sharedpath = s
88 88 except IOError, inst:
89 89 if inst.errno != errno.ENOENT:
90 90 raise
91 91
92 92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 93 self.spath = self.store.path
94 94 self.sopener = self.store.opener
95 95 self.sjoin = self.store.join
96 96 self.opener.createmode = self.store.createmode
97 97 self._applyrequirements(requirements)
98 98 if create:
99 99 self._writerequirements()
100 100
101 101
102 102 self._branchcache = None
103 103 self._branchcachetip = None
104 104 self.filterpats = {}
105 105 self._datafilters = {}
106 106 self._transref = self._lockref = self._wlockref = None
107 107
108 108 # A cache for various files under .hg/ that tracks file changes,
109 109 # (used by the filecache decorator)
110 110 #
111 111 # Maps a property name to its util.filecacheentry
112 112 self._filecache = {}
113 113
114 114 def _applyrequirements(self, requirements):
115 115 self.requirements = requirements
116 116 openerreqs = set(('revlogv1', 'generaldelta'))
117 117 self.sopener.options = dict((r, 1) for r in requirements
118 118 if r in openerreqs)
119 119
120 120 def _writerequirements(self):
121 121 reqfile = self.opener("requires", "w")
122 122 for r in self.requirements:
123 123 reqfile.write("%s\n" % r)
124 124 reqfile.close()
125 125
126 126 def _checknested(self, path):
127 127 """Determine if path is a legal nested repository."""
128 128 if not path.startswith(self.root):
129 129 return False
130 130 subpath = path[len(self.root) + 1:]
131 131
132 132 # XXX: Checking against the current working copy is wrong in
133 133 # the sense that it can reject things like
134 134 #
135 135 # $ hg cat -r 10 sub/x.txt
136 136 #
137 137 # if sub/ is no longer a subrepository in the working copy
138 138 # parent revision.
139 139 #
140 140 # However, it can of course also allow things that would have
141 141 # been rejected before, such as the above cat command if sub/
142 142 # is a subrepository now, but was a normal directory before.
143 143 # The old path auditor would have rejected by mistake since it
144 144 # panics when it sees sub/.hg/.
145 145 #
146 146 # All in all, checking against the working copy seems sensible
147 147 # since we want to prevent access to nested repositories on
148 148 # the filesystem *now*.
149 149 ctx = self[None]
150 150 parts = util.splitpath(subpath)
151 151 while parts:
152 152 prefix = os.sep.join(parts)
153 153 if prefix in ctx.substate:
154 154 if prefix == subpath:
155 155 return True
156 156 else:
157 157 sub = ctx.sub(prefix)
158 158 return sub.checknested(subpath[len(prefix) + 1:])
159 159 else:
160 160 parts.pop()
161 161 return False
162 162
163 163 @filecache('bookmarks')
164 164 def _bookmarks(self):
165 165 return bookmarks.read(self)
166 166
167 167 @filecache('bookmarks.current')
168 168 def _bookmarkcurrent(self):
169 169 return bookmarks.readcurrent(self)
170 170
171 171 def _writebookmarks(self, marks):
172 172 bookmarks.write(self)
173 173
174 174 @filecache('phaseroots')
175 175 def _phaseroots(self):
176 176 self._dirtyphases = False
177 177 phaseroots = phases.readroots(self)
178 178 phases.filterunknown(self, phaseroots)
179 179 return phaseroots
180 180
181 181 @propertycache
182 182 def _phaserev(self):
183 183 cache = [0] * len(self)
184 184 for phase in phases.trackedphases:
185 185 roots = map(self.changelog.rev, self._phaseroots[phase])
186 186 if roots:
187 187 for rev in roots:
188 188 cache[rev] = phase
189 189 for rev in self.changelog.descendants(*roots):
190 190 cache[rev] = phase
191 191 return cache
192 192
193 193 @filecache('00changelog.i', True)
194 194 def changelog(self):
195 195 c = changelog.changelog(self.sopener)
196 196 if 'HG_PENDING' in os.environ:
197 197 p = os.environ['HG_PENDING']
198 198 if p.startswith(self.root):
199 199 c.readpending('00changelog.i.a')
200 200 return c
201 201
202 202 @filecache('00manifest.i', True)
203 203 def manifest(self):
204 204 return manifest.manifest(self.sopener)
205 205
206 206 @filecache('dirstate')
207 207 def dirstate(self):
208 208 warned = [0]
209 209 def validate(node):
210 210 try:
211 211 self.changelog.rev(node)
212 212 return node
213 213 except error.LookupError:
214 214 if not warned[0]:
215 215 warned[0] = True
216 216 self.ui.warn(_("warning: ignoring unknown"
217 217 " working parent %s!\n") % short(node))
218 218 return nullid
219 219
220 220 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221 221
222 222 def __getitem__(self, changeid):
223 223 if changeid is None:
224 224 return context.workingctx(self)
225 225 return context.changectx(self, changeid)
226 226
227 227 def __contains__(self, changeid):
228 228 try:
229 229 return bool(self.lookup(changeid))
230 230 except error.RepoLookupError:
231 231 return False
232 232
233 233 def __nonzero__(self):
234 234 return True
235 235
236 236 def __len__(self):
237 237 return len(self.changelog)
238 238
239 239 def __iter__(self):
240 240 for i in xrange(len(self)):
241 241 yield i
242 242
243 243 def revs(self, expr, *args):
244 244 '''Return a list of revisions matching the given revset'''
245 245 expr = revset.formatspec(expr, *args)
246 246 m = revset.match(None, expr)
247 247 return [r for r in m(self, range(len(self)))]
248 248
249 249 def set(self, expr, *args):
250 250 '''
251 251 Yield a context for each matching revision, after doing arg
252 252 replacement via revset.formatspec
253 253 '''
254 254 for r in self.revs(expr, *args):
255 255 yield self[r]
256 256
257 257 def url(self):
258 258 return 'file:' + self.root
259 259
260 260 def hook(self, name, throw=False, **args):
261 261 return hook.hook(self.ui, self, name, throw, **args)
262 262
263 263 tag_disallowed = ':\r\n'
264 264
265 265 def _tag(self, names, node, message, local, user, date, extra={}):
266 266 if isinstance(names, str):
267 267 allchars = names
268 268 names = (names,)
269 269 else:
270 270 allchars = ''.join(names)
271 271 for c in self.tag_disallowed:
272 272 if c in allchars:
273 273 raise util.Abort(_('%r cannot be used in a tag name') % c)
274 274
275 275 branches = self.branchmap()
276 276 for name in names:
277 277 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 278 local=local)
279 279 if name in branches:
280 280 self.ui.warn(_("warning: tag %s conflicts with existing"
281 281 " branch name\n") % name)
282 282
283 283 def writetags(fp, names, munge, prevtags):
284 284 fp.seek(0, 2)
285 285 if prevtags and prevtags[-1] != '\n':
286 286 fp.write('\n')
287 287 for name in names:
288 288 m = munge and munge(name) or name
289 289 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 290 old = self.tags().get(name, nullid)
291 291 fp.write('%s %s\n' % (hex(old), m))
292 292 fp.write('%s %s\n' % (hex(node), m))
293 293 fp.close()
294 294
295 295 prevtags = ''
296 296 if local:
297 297 try:
298 298 fp = self.opener('localtags', 'r+')
299 299 except IOError:
300 300 fp = self.opener('localtags', 'a')
301 301 else:
302 302 prevtags = fp.read()
303 303
304 304 # local tags are stored in the current charset
305 305 writetags(fp, names, None, prevtags)
306 306 for name in names:
307 307 self.hook('tag', node=hex(node), tag=name, local=local)
308 308 return
309 309
310 310 try:
311 311 fp = self.wfile('.hgtags', 'rb+')
312 312 except IOError, e:
313 313 if e.errno != errno.ENOENT:
314 314 raise
315 315 fp = self.wfile('.hgtags', 'ab')
316 316 else:
317 317 prevtags = fp.read()
318 318
319 319 # committed tags are stored in UTF-8
320 320 writetags(fp, names, encoding.fromlocal, prevtags)
321 321
322 322 fp.close()
323 323
324 324 if '.hgtags' not in self.dirstate:
325 325 self[None].add(['.hgtags'])
326 326
327 327 m = matchmod.exact(self.root, '', ['.hgtags'])
328 328 tagnode = self.commit(message, user, date, extra=extra, match=m)
329 329
330 330 for name in names:
331 331 self.hook('tag', node=hex(node), tag=name, local=local)
332 332
333 333 return tagnode
334 334
335 335 def tag(self, names, node, message, local, user, date):
336 336 '''tag a revision with one or more symbolic names.
337 337
338 338 names is a list of strings or, when adding a single tag, names may be a
339 339 string.
340 340
341 341 if local is True, the tags are stored in a per-repository file.
342 342 otherwise, they are stored in the .hgtags file, and a new
343 343 changeset is committed with the change.
344 344
345 345 keyword arguments:
346 346
347 347 local: whether to store tags in non-version-controlled file
348 348 (default False)
349 349
350 350 message: commit message to use if committing
351 351
352 352 user: name of user to use if committing
353 353
354 354 date: date tuple to use if committing'''
355 355
356 356 if not local:
357 357 for x in self.status()[:5]:
358 358 if '.hgtags' in x:
359 359 raise util.Abort(_('working copy of .hgtags is changed '
360 360 '(please commit .hgtags manually)'))
361 361
362 362 self.tags() # instantiate the cache
363 363 self._tag(names, node, message, local, user, date)
364 364
365 365 @propertycache
366 366 def _tagscache(self):
367 367 '''Returns a tagscache object that contains various tags related caches.'''
368 368
369 369 # This simplifies its cache management by having one decorated
370 370 # function (this one) and the rest simply fetch things from it.
371 371 class tagscache(object):
372 372 def __init__(self):
373 373 # These two define the set of tags for this repository. tags
374 374 # maps tag name to node; tagtypes maps tag name to 'global' or
375 375 # 'local'. (Global tags are defined by .hgtags across all
376 376 # heads, and local tags are defined in .hg/localtags.)
377 377 # They constitute the in-memory cache of tags.
378 378 self.tags = self.tagtypes = None
379 379
380 380 self.nodetagscache = self.tagslist = None
381 381
382 382 cache = tagscache()
383 383 cache.tags, cache.tagtypes = self._findtags()
384 384
385 385 return cache
386 386
387 387 def tags(self):
388 388 '''return a mapping of tag to node'''
389 389 return self._tagscache.tags
390 390
391 391 def _findtags(self):
392 392 '''Do the hard work of finding tags. Return a pair of dicts
393 393 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 394 maps tag name to a string like \'global\' or \'local\'.
395 395 Subclasses or extensions are free to add their own tags, but
396 396 should be aware that the returned dicts will be retained for the
397 397 duration of the localrepo object.'''
398 398
399 399 # XXX what tagtype should subclasses/extensions use? Currently
400 400 # mq and bookmarks add tags, but do not set the tagtype at all.
401 401 # Should each extension invent its own tag type? Should there
402 402 # be one tagtype for all such "virtual" tags? Or is the status
403 403 # quo fine?
404 404
405 405 alltags = {} # map tag name to (node, hist)
406 406 tagtypes = {}
407 407
408 408 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 409 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410 410
411 411 # Build the return dicts. Have to re-encode tag names because
412 412 # the tags module always uses UTF-8 (in order not to lose info
413 413 # writing to the cache), but the rest of Mercurial wants them in
414 414 # local encoding.
415 415 tags = {}
416 416 for (name, (node, hist)) in alltags.iteritems():
417 417 if node != nullid:
418 418 try:
419 419 # ignore tags to unknown nodes
420 420 self.changelog.lookup(node)
421 421 tags[encoding.tolocal(name)] = node
422 422 except error.LookupError:
423 423 pass
424 424 tags['tip'] = self.changelog.tip()
425 425 tagtypes = dict([(encoding.tolocal(name), value)
426 426 for (name, value) in tagtypes.iteritems()])
427 427 return (tags, tagtypes)
428 428
429 429 def tagtype(self, tagname):
430 430 '''
431 431 return the type of the given tag. result can be:
432 432
433 433 'local' : a local tag
434 434 'global' : a global tag
435 435 None : tag does not exist
436 436 '''
437 437
438 438 return self._tagscache.tagtypes.get(tagname)
439 439
440 440 def tagslist(self):
441 441 '''return a list of tags ordered by revision'''
442 442 if not self._tagscache.tagslist:
443 443 l = []
444 444 for t, n in self.tags().iteritems():
445 445 r = self.changelog.rev(n)
446 446 l.append((r, t, n))
447 447 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448 448
449 449 return self._tagscache.tagslist
450 450
451 451 def nodetags(self, node):
452 452 '''return the tags associated with a node'''
453 453 if not self._tagscache.nodetagscache:
454 454 nodetagscache = {}
455 455 for t, n in self.tags().iteritems():
456 456 nodetagscache.setdefault(n, []).append(t)
457 457 for tags in nodetagscache.itervalues():
458 458 tags.sort()
459 459 self._tagscache.nodetagscache = nodetagscache
460 460 return self._tagscache.nodetagscache.get(node, [])
461 461
462 462 def nodebookmarks(self, node):
463 463 marks = []
464 464 for bookmark, n in self._bookmarks.iteritems():
465 465 if n == node:
466 466 marks.append(bookmark)
467 467 return sorted(marks)
468 468
469 469 def _branchtags(self, partial, lrev):
470 470 # TODO: rename this function?
471 471 tiprev = len(self) - 1
472 472 if lrev != tiprev:
473 473 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 474 self._updatebranchcache(partial, ctxgen)
475 475 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476 476
477 477 return partial
478 478
479 479 def updatebranchcache(self):
480 480 tip = self.changelog.tip()
481 481 if self._branchcache is not None and self._branchcachetip == tip:
482 482 return self._branchcache
483 483
484 484 oldtip = self._branchcachetip
485 485 self._branchcachetip = tip
486 486 if oldtip is None or oldtip not in self.changelog.nodemap:
487 487 partial, last, lrev = self._readbranchcache()
488 488 else:
489 489 lrev = self.changelog.rev(oldtip)
490 490 partial = self._branchcache
491 491
492 492 self._branchtags(partial, lrev)
493 493 # this private cache holds all heads (not just tips)
494 494 self._branchcache = partial
495 495
496 496 def branchmap(self):
497 497 '''returns a dictionary {branch: [branchheads]}'''
498 498 self.updatebranchcache()
499 499 return self._branchcache
500 500
501 501 def branchtags(self):
502 502 '''return a dict where branch names map to the tipmost head of
503 503 the branch, open heads come before closed'''
504 504 bt = {}
505 505 for bn, heads in self.branchmap().iteritems():
506 506 tip = heads[-1]
507 507 for h in reversed(heads):
508 508 if 'close' not in self.changelog.read(h)[5]:
509 509 tip = h
510 510 break
511 511 bt[bn] = tip
512 512 return bt
513 513
514 514 def _readbranchcache(self):
515 515 partial = {}
516 516 try:
517 517 f = self.opener("cache/branchheads")
518 518 lines = f.read().split('\n')
519 519 f.close()
520 520 except (IOError, OSError):
521 521 return {}, nullid, nullrev
522 522
523 523 try:
524 524 last, lrev = lines.pop(0).split(" ", 1)
525 525 last, lrev = bin(last), int(lrev)
526 526 if lrev >= len(self) or self[lrev].node() != last:
527 527 # invalidate the cache
528 528 raise ValueError('invalidating branch cache (tip differs)')
529 529 for l in lines:
530 530 if not l:
531 531 continue
532 532 node, label = l.split(" ", 1)
533 533 label = encoding.tolocal(label.strip())
534 534 partial.setdefault(label, []).append(bin(node))
535 535 except KeyboardInterrupt:
536 536 raise
537 537 except Exception, inst:
538 538 if self.ui.debugflag:
539 539 self.ui.warn(str(inst), '\n')
540 540 partial, last, lrev = {}, nullid, nullrev
541 541 return partial, last, lrev
542 542
543 543 def _writebranchcache(self, branches, tip, tiprev):
544 544 try:
545 545 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 546 f.write("%s %s\n" % (hex(tip), tiprev))
547 547 for label, nodes in branches.iteritems():
548 548 for node in nodes:
549 549 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 550 f.close()
551 551 except (IOError, OSError):
552 552 pass
553 553
554 554 def _updatebranchcache(self, partial, ctxgen):
555 555 # collect new branch entries
556 556 newbranches = {}
557 557 for c in ctxgen:
558 558 newbranches.setdefault(c.branch(), []).append(c.node())
559 559 # if older branchheads are reachable from new ones, they aren't
560 560 # really branchheads. Note checking parents is insufficient:
561 561 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 562 for branch, newnodes in newbranches.iteritems():
563 563 bheads = partial.setdefault(branch, [])
564 564 bheads.extend(newnodes)
565 565 if len(bheads) <= 1:
566 566 continue
567 567 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 568 # starting from tip means fewer passes over reachable
569 569 while newnodes:
570 570 latest = newnodes.pop()
571 571 if latest not in bheads:
572 572 continue
573 573 minbhrev = self[bheads[0]].node()
574 574 reachable = self.changelog.reachable(latest, minbhrev)
575 575 reachable.remove(latest)
576 576 if reachable:
577 577 bheads = [b for b in bheads if b not in reachable]
578 578 partial[branch] = bheads
579 579
580 580 def lookup(self, key):
581 581 if isinstance(key, int):
582 582 return self.changelog.node(key)
583 583 elif key == '.':
584 584 return self.dirstate.p1()
585 585 elif key == 'null':
586 586 return nullid
587 587 elif key == 'tip':
588 588 return self.changelog.tip()
589 589 n = self.changelog._match(key)
590 590 if n:
591 591 return n
592 592 if key in self._bookmarks:
593 593 return self._bookmarks[key]
594 594 if key in self.tags():
595 595 return self.tags()[key]
596 596 if key in self.branchtags():
597 597 return self.branchtags()[key]
598 598 n = self.changelog._partialmatch(key)
599 599 if n:
600 600 return n
601 601
602 602 # can't find key, check if it might have come from damaged dirstate
603 603 if key in self.dirstate.parents():
604 604 raise error.Abort(_("working directory has unknown parent '%s'!")
605 605 % short(key))
606 606 try:
607 607 if len(key) == 20:
608 608 key = hex(key)
609 609 except TypeError:
610 610 pass
611 611 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612 612
613 613 def lookupbranch(self, key, remote=None):
614 614 repo = remote or self
615 615 if key in repo.branchmap():
616 616 return key
617 617
618 618 repo = (remote and remote.local()) and remote or self
619 619 return repo[key].branch()
620 620
621 621 def known(self, nodes):
622 622 nm = self.changelog.nodemap
623 623 return [(n in nm) for n in nodes]
624 624
625 625 def local(self):
626 626 return self
627 627
628 628 def join(self, f):
629 629 return os.path.join(self.path, f)
630 630
631 631 def wjoin(self, f):
632 632 return os.path.join(self.root, f)
633 633
634 634 def file(self, f):
635 635 if f[0] == '/':
636 636 f = f[1:]
637 637 return filelog.filelog(self.sopener, f)
638 638
639 639 def changectx(self, changeid):
640 640 return self[changeid]
641 641
642 642 def parents(self, changeid=None):
643 643 '''get list of changectxs for parents of changeid'''
644 644 return self[changeid].parents()
645 645
646 646 def filectx(self, path, changeid=None, fileid=None):
647 647 """changeid can be a changeset revision, node, or tag.
648 648 fileid can be a file revision or node."""
649 649 return context.filectx(self, path, changeid, fileid)
650 650
651 651 def getcwd(self):
652 652 return self.dirstate.getcwd()
653 653
654 654 def pathto(self, f, cwd=None):
655 655 return self.dirstate.pathto(f, cwd)
656 656
657 657 def wfile(self, f, mode='r'):
658 658 return self.wopener(f, mode)
659 659
660 660 def _link(self, f):
661 661 return os.path.islink(self.wjoin(f))
662 662
663 663 def _loadfilter(self, filter):
664 664 if filter not in self.filterpats:
665 665 l = []
666 666 for pat, cmd in self.ui.configitems(filter):
667 667 if cmd == '!':
668 668 continue
669 669 mf = matchmod.match(self.root, '', [pat])
670 670 fn = None
671 671 params = cmd
672 672 for name, filterfn in self._datafilters.iteritems():
673 673 if cmd.startswith(name):
674 674 fn = filterfn
675 675 params = cmd[len(name):].lstrip()
676 676 break
677 677 if not fn:
678 678 fn = lambda s, c, **kwargs: util.filter(s, c)
679 679 # Wrap old filters not supporting keyword arguments
680 680 if not inspect.getargspec(fn)[2]:
681 681 oldfn = fn
682 682 fn = lambda s, c, **kwargs: oldfn(s, c)
683 683 l.append((mf, fn, params))
684 684 self.filterpats[filter] = l
685 685 return self.filterpats[filter]
686 686
687 687 def _filter(self, filterpats, filename, data):
688 688 for mf, fn, cmd in filterpats:
689 689 if mf(filename):
690 690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 692 break
693 693
694 694 return data
695 695
696 696 @propertycache
697 697 def _encodefilterpats(self):
698 698 return self._loadfilter('encode')
699 699
700 700 @propertycache
701 701 def _decodefilterpats(self):
702 702 return self._loadfilter('decode')
703 703
704 704 def adddatafilter(self, name, filter):
705 705 self._datafilters[name] = filter
706 706
707 707 def wread(self, filename):
708 708 if self._link(filename):
709 709 data = os.readlink(self.wjoin(filename))
710 710 else:
711 711 data = self.wopener.read(filename)
712 712 return self._filter(self._encodefilterpats, filename, data)
713 713
714 714 def wwrite(self, filename, data, flags):
715 715 data = self._filter(self._decodefilterpats, filename, data)
716 716 if 'l' in flags:
717 717 self.wopener.symlink(data, filename)
718 718 else:
719 719 self.wopener.write(filename, data)
720 720 if 'x' in flags:
721 721 util.setflags(self.wjoin(filename), False, True)
722 722
723 723 def wwritedata(self, filename, data):
724 724 return self._filter(self._decodefilterpats, filename, data)
725 725
726 726 def transaction(self, desc):
727 727 tr = self._transref and self._transref() or None
728 728 if tr and tr.running():
729 729 return tr.nest()
730 730
731 731 # abort here if the journal already exists
732 732 if os.path.exists(self.sjoin("journal")):
733 733 raise error.RepoError(
734 734 _("abandoned transaction found - run hg recover"))
735 735
736 736 journalfiles = self._writejournal(desc)
737 737 renames = [(x, undoname(x)) for x in journalfiles]
738 738
739 739 tr = transaction.transaction(self.ui.warn, self.sopener,
740 740 self.sjoin("journal"),
741 741 aftertrans(renames),
742 742 self.store.createmode)
743 743 self._transref = weakref.ref(tr)
744 744 return tr
745 745
746 746 def _writejournal(self, desc):
747 747 # save dirstate for rollback
748 748 try:
749 749 ds = self.opener.read("dirstate")
750 750 except IOError:
751 751 ds = ""
752 752 self.opener.write("journal.dirstate", ds)
753 753 self.opener.write("journal.branch",
754 754 encoding.fromlocal(self.dirstate.branch()))
755 755 self.opener.write("journal.desc",
756 756 "%d\n%s\n" % (len(self), desc))
757 757
758 758 bkname = self.join('bookmarks')
759 759 if os.path.exists(bkname):
760 760 util.copyfile(bkname, self.join('journal.bookmarks'))
761 761 else:
762 762 self.opener.write('journal.bookmarks', '')
763 763 phasesname = self.sjoin('phaseroots')
764 764 if os.path.exists(phasesname):
765 765 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
766 766 else:
767 767 self.sopener.write('journal.phaseroots', '')
768 768
769 769 return (self.sjoin('journal'), self.join('journal.dirstate'),
770 770 self.join('journal.branch'), self.join('journal.desc'),
771 771 self.join('journal.bookmarks'),
772 772 self.sjoin('journal.phaseroots'))
773 773
774 774 def recover(self):
775 775 lock = self.lock()
776 776 try:
777 777 if os.path.exists(self.sjoin("journal")):
778 778 self.ui.status(_("rolling back interrupted transaction\n"))
779 779 transaction.rollback(self.sopener, self.sjoin("journal"),
780 780 self.ui.warn)
781 781 self.invalidate()
782 782 return True
783 783 else:
784 784 self.ui.warn(_("no interrupted transaction available\n"))
785 785 return False
786 786 finally:
787 787 lock.release()
788 788
789 789 def rollback(self, dryrun=False, force=False):
790 790 wlock = lock = None
791 791 try:
792 792 wlock = self.wlock()
793 793 lock = self.lock()
794 794 if os.path.exists(self.sjoin("undo")):
795 795 return self._rollback(dryrun, force)
796 796 else:
797 797 self.ui.warn(_("no rollback information available\n"))
798 798 return 1
799 799 finally:
800 800 release(lock, wlock)
801 801
802 802 def _rollback(self, dryrun, force):
803 803 ui = self.ui
804 804 try:
805 805 args = self.opener.read('undo.desc').splitlines()
806 806 (oldlen, desc, detail) = (int(args[0]), args[1], None)
807 807 if len(args) >= 3:
808 808 detail = args[2]
809 809 oldtip = oldlen - 1
810 810
811 811 if detail and ui.verbose:
812 812 msg = (_('repository tip rolled back to revision %s'
813 813 ' (undo %s: %s)\n')
814 814 % (oldtip, desc, detail))
815 815 else:
816 816 msg = (_('repository tip rolled back to revision %s'
817 817 ' (undo %s)\n')
818 818 % (oldtip, desc))
819 819 except IOError:
820 820 msg = _('rolling back unknown transaction\n')
821 821 desc = None
822 822
823 823 if not force and self['.'] != self['tip'] and desc == 'commit':
824 824 raise util.Abort(
825 825 _('rollback of last commit while not checked out '
826 826 'may lose data'), hint=_('use -f to force'))
827 827
828 828 ui.status(msg)
829 829 if dryrun:
830 830 return 0
831 831
832 832 parents = self.dirstate.parents()
833 833 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
834 834 if os.path.exists(self.join('undo.bookmarks')):
835 835 util.rename(self.join('undo.bookmarks'),
836 836 self.join('bookmarks'))
837 837 if os.path.exists(self.sjoin('undo.phaseroots')):
838 838 util.rename(self.sjoin('undo.phaseroots'),
839 839 self.sjoin('phaseroots'))
840 840 self.invalidate()
841 841
842 842 parentgone = (parents[0] not in self.changelog.nodemap or
843 843 parents[1] not in self.changelog.nodemap)
844 844 if parentgone:
845 845 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
846 846 try:
847 847 branch = self.opener.read('undo.branch')
848 848 self.dirstate.setbranch(branch)
849 849 except IOError:
850 850 ui.warn(_('named branch could not be reset: '
851 851 'current branch is still \'%s\'\n')
852 852 % self.dirstate.branch())
853 853
854 854 self.dirstate.invalidate()
855 855 self.destroyed()
856 856 parents = tuple([p.rev() for p in self.parents()])
857 857 if len(parents) > 1:
858 858 ui.status(_('working directory now based on '
859 859 'revisions %d and %d\n') % parents)
860 860 else:
861 861 ui.status(_('working directory now based on '
862 862 'revision %d\n') % parents)
863 863 return 0
864 864
865 865 def invalidatecaches(self):
866 866 try:
867 867 delattr(self, '_tagscache')
868 868 except AttributeError:
869 869 pass
870 870
871 871 self._branchcache = None # in UTF-8
872 872 self._branchcachetip = None
873 873
874 874 def invalidatedirstate(self):
875 875 '''Invalidates the dirstate, causing the next call to dirstate
876 876 to check if it was modified since the last time it was read,
877 877 rereading it if it has.
878 878
879 879 This is different to dirstate.invalidate() that it doesn't always
880 880 rereads the dirstate. Use dirstate.invalidate() if you want to
881 881 explicitly read the dirstate again (i.e. restoring it to a previous
882 882 known good state).'''
883 883 try:
884 884 delattr(self, 'dirstate')
885 885 except AttributeError:
886 886 pass
887 887
888 888 def invalidate(self):
889 889 for k in self._filecache:
890 890 # dirstate is invalidated separately in invalidatedirstate()
891 891 if k == 'dirstate':
892 892 continue
893 893
894 894 try:
895 895 delattr(self, k)
896 896 except AttributeError:
897 897 pass
898 898 self.invalidatecaches()
899 899
900 900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 901 try:
902 902 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 903 except error.LockHeld, inst:
904 904 if not wait:
905 905 raise
906 906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 907 (desc, inst.locker))
908 908 # default to 600 seconds timeout
909 909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 910 releasefn, desc=desc)
911 911 if acquirefn:
912 912 acquirefn()
913 913 return l
914 914
915 915 def lock(self, wait=True):
916 916 '''Lock the repository store (.hg/store) and return a weak reference
917 917 to the lock. Use this before modifying the store (e.g. committing or
918 918 stripping). If you are opening a transaction, get a lock as well.)'''
919 919 l = self._lockref and self._lockref()
920 920 if l is not None and l.held:
921 921 l.lock()
922 922 return l
923 923
924 924 def unlock():
925 925 self.store.write()
926 926 if self._dirtyphases:
927 927 phases.writeroots(self)
928 928 for k, ce in self._filecache.items():
929 929 if k == 'dirstate':
930 930 continue
931 931 ce.refresh()
932 932
933 933 l = self._lock(self.sjoin("lock"), wait, unlock,
934 934 self.invalidate, _('repository %s') % self.origroot)
935 935 self._lockref = weakref.ref(l)
936 936 return l
937 937
938 938 def wlock(self, wait=True):
939 939 '''Lock the non-store parts of the repository (everything under
940 940 .hg except .hg/store) and return a weak reference to the lock.
941 941 Use this before modifying files in .hg.'''
942 942 l = self._wlockref and self._wlockref()
943 943 if l is not None and l.held:
944 944 l.lock()
945 945 return l
946 946
947 947 def unlock():
948 948 self.dirstate.write()
949 949 ce = self._filecache.get('dirstate')
950 950 if ce:
951 951 ce.refresh()
952 952
953 953 l = self._lock(self.join("wlock"), wait, unlock,
954 954 self.invalidatedirstate, _('working directory of %s') %
955 955 self.origroot)
956 956 self._wlockref = weakref.ref(l)
957 957 return l
958 958
959 959 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
960 960 """
961 961 commit an individual file as part of a larger transaction
962 962 """
963 963
964 964 fname = fctx.path()
965 965 text = fctx.data()
966 966 flog = self.file(fname)
967 967 fparent1 = manifest1.get(fname, nullid)
968 968 fparent2 = fparent2o = manifest2.get(fname, nullid)
969 969
970 970 meta = {}
971 971 copy = fctx.renamed()
972 972 if copy and copy[0] != fname:
973 973 # Mark the new revision of this file as a copy of another
974 974 # file. This copy data will effectively act as a parent
975 975 # of this new revision. If this is a merge, the first
976 976 # parent will be the nullid (meaning "look up the copy data")
977 977 # and the second one will be the other parent. For example:
978 978 #
979 979 # 0 --- 1 --- 3 rev1 changes file foo
980 980 # \ / rev2 renames foo to bar and changes it
981 981 # \- 2 -/ rev3 should have bar with all changes and
982 982 # should record that bar descends from
983 983 # bar in rev2 and foo in rev1
984 984 #
985 985 # this allows this merge to succeed:
986 986 #
987 987 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
988 988 # \ / merging rev3 and rev4 should use bar@rev2
989 989 # \- 2 --- 4 as the merge base
990 990 #
991 991
992 992 cfname = copy[0]
993 993 crev = manifest1.get(cfname)
994 994 newfparent = fparent2
995 995
996 996 if manifest2: # branch merge
997 997 if fparent2 == nullid or crev is None: # copied on remote side
998 998 if cfname in manifest2:
999 999 crev = manifest2[cfname]
1000 1000 newfparent = fparent1
1001 1001
1002 1002 # find source in nearest ancestor if we've lost track
1003 1003 if not crev:
1004 1004 self.ui.debug(" %s: searching for copy revision for %s\n" %
1005 1005 (fname, cfname))
1006 1006 for ancestor in self[None].ancestors():
1007 1007 if cfname in ancestor:
1008 1008 crev = ancestor[cfname].filenode()
1009 1009 break
1010 1010
1011 1011 if crev:
1012 1012 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1013 1013 meta["copy"] = cfname
1014 1014 meta["copyrev"] = hex(crev)
1015 1015 fparent1, fparent2 = nullid, newfparent
1016 1016 else:
1017 1017 self.ui.warn(_("warning: can't find ancestor for '%s' "
1018 1018 "copied from '%s'!\n") % (fname, cfname))
1019 1019
1020 1020 elif fparent2 != nullid:
1021 1021 # is one parent an ancestor of the other?
1022 1022 fparentancestor = flog.ancestor(fparent1, fparent2)
1023 1023 if fparentancestor == fparent1:
1024 1024 fparent1, fparent2 = fparent2, nullid
1025 1025 elif fparentancestor == fparent2:
1026 1026 fparent2 = nullid
1027 1027
1028 1028 # is the file changed?
1029 1029 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1030 1030 changelist.append(fname)
1031 1031 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1032 1032
1033 1033 # are just the flags changed during merge?
1034 1034 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1035 1035 changelist.append(fname)
1036 1036
1037 1037 return fparent1
1038 1038
1039 1039 def commit(self, text="", user=None, date=None, match=None, force=False,
1040 1040 editor=False, extra={}):
1041 1041 """Add a new revision to current repository.
1042 1042
1043 1043 Revision information is gathered from the working directory,
1044 1044 match can be used to filter the committed files. If editor is
1045 1045 supplied, it is called to get a commit message.
1046 1046 """
1047 1047
1048 1048 def fail(f, msg):
1049 1049 raise util.Abort('%s: %s' % (f, msg))
1050 1050
1051 1051 if not match:
1052 1052 match = matchmod.always(self.root, '')
1053 1053
1054 1054 if not force:
1055 1055 vdirs = []
1056 1056 match.dir = vdirs.append
1057 1057 match.bad = fail
1058 1058
1059 1059 wlock = self.wlock()
1060 1060 try:
1061 1061 wctx = self[None]
1062 1062 merge = len(wctx.parents()) > 1
1063 1063
1064 1064 if (not force and merge and match and
1065 1065 (match.files() or match.anypats())):
1066 1066 raise util.Abort(_('cannot partially commit a merge '
1067 1067 '(do not specify files or patterns)'))
1068 1068
1069 1069 changes = self.status(match=match, clean=force)
1070 1070 if force:
1071 1071 changes[0].extend(changes[6]) # mq may commit unchanged files
1072 1072
1073 1073 # check subrepos
1074 1074 subs = []
1075 1075 removedsubs = set()
1076 1076 if '.hgsub' in wctx:
1077 1077 # only manage subrepos and .hgsubstate if .hgsub is present
1078 1078 for p in wctx.parents():
1079 1079 removedsubs.update(s for s in p.substate if match(s))
1080 1080 for s in wctx.substate:
1081 1081 removedsubs.discard(s)
1082 1082 if match(s) and wctx.sub(s).dirty():
1083 1083 subs.append(s)
1084 1084 if (subs or removedsubs):
1085 1085 if (not match('.hgsub') and
1086 1086 '.hgsub' in (wctx.modified() + wctx.added())):
1087 1087 raise util.Abort(
1088 1088 _("can't commit subrepos without .hgsub"))
1089 1089 if '.hgsubstate' not in changes[0]:
1090 1090 changes[0].insert(0, '.hgsubstate')
1091 1091 if '.hgsubstate' in changes[2]:
1092 1092 changes[2].remove('.hgsubstate')
1093 1093 elif '.hgsub' in changes[2]:
1094 1094 # clean up .hgsubstate when .hgsub is removed
1095 1095 if ('.hgsubstate' in wctx and
1096 1096 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1097 1097 changes[2].insert(0, '.hgsubstate')
1098 1098
1099 1099 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1100 1100 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1101 1101 if changedsubs:
1102 1102 raise util.Abort(_("uncommitted changes in subrepo %s")
1103 1103 % changedsubs[0],
1104 1104 hint=_("use --subrepos for recursive commit"))
1105 1105
1106 1106 # make sure all explicit patterns are matched
1107 1107 if not force and match.files():
1108 1108 matched = set(changes[0] + changes[1] + changes[2])
1109 1109
1110 1110 for f in match.files():
1111 1111 if f == '.' or f in matched or f in wctx.substate:
1112 1112 continue
1113 1113 if f in changes[3]: # missing
1114 1114 fail(f, _('file not found!'))
1115 1115 if f in vdirs: # visited directory
1116 1116 d = f + '/'
1117 1117 for mf in matched:
1118 1118 if mf.startswith(d):
1119 1119 break
1120 1120 else:
1121 1121 fail(f, _("no match under directory!"))
1122 1122 elif f not in self.dirstate:
1123 1123 fail(f, _("file not tracked!"))
1124 1124
1125 1125 if (not force and not extra.get("close") and not merge
1126 1126 and not (changes[0] or changes[1] or changes[2])
1127 1127 and wctx.branch() == wctx.p1().branch()):
1128 1128 return None
1129 1129
1130 1130 ms = mergemod.mergestate(self)
1131 1131 for f in changes[0]:
1132 1132 if f in ms and ms[f] == 'u':
1133 1133 raise util.Abort(_("unresolved merge conflicts "
1134 1134 "(see hg help resolve)"))
1135 1135
1136 1136 cctx = context.workingctx(self, text, user, date, extra, changes)
1137 1137 if editor:
1138 1138 cctx._text = editor(self, cctx, subs)
1139 1139 edited = (text != cctx._text)
1140 1140
1141 1141 # commit subs
1142 1142 if subs or removedsubs:
1143 1143 state = wctx.substate.copy()
1144 1144 for s in sorted(subs):
1145 1145 sub = wctx.sub(s)
1146 1146 self.ui.status(_('committing subrepository %s\n') %
1147 1147 subrepo.subrelpath(sub))
1148 1148 sr = sub.commit(cctx._text, user, date)
1149 1149 state[s] = (state[s][0], sr)
1150 1150 subrepo.writestate(self, state)
1151 1151
1152 1152 # Save commit message in case this transaction gets rolled back
1153 1153 # (e.g. by a pretxncommit hook). Leave the content alone on
1154 1154 # the assumption that the user will use the same editor again.
1155 1155 msgfn = self.savecommitmessage(cctx._text)
1156 1156
1157 1157 p1, p2 = self.dirstate.parents()
1158 1158 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1159 1159 try:
1160 1160 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1161 1161 ret = self.commitctx(cctx, True)
1162 1162 except:
1163 1163 if edited:
1164 1164 self.ui.write(
1165 1165 _('note: commit message saved in %s\n') % msgfn)
1166 1166 raise
1167 1167
1168 1168 # update bookmarks, dirstate and mergestate
1169 1169 bookmarks.update(self, p1, ret)
1170 1170 for f in changes[0] + changes[1]:
1171 1171 self.dirstate.normal(f)
1172 1172 for f in changes[2]:
1173 1173 self.dirstate.drop(f)
1174 1174 self.dirstate.setparents(ret)
1175 1175 ms.reset()
1176 1176 finally:
1177 1177 wlock.release()
1178 1178
1179 1179 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1180 1180 return ret
1181 1181
1182 1182 def commitctx(self, ctx, error=False):
1183 1183 """Add a new revision to current repository.
1184 1184 Revision information is passed via the context argument.
1185 1185 """
1186 1186
1187 1187 tr = lock = None
1188 1188 removed = list(ctx.removed())
1189 1189 p1, p2 = ctx.p1(), ctx.p2()
1190 1190 user = ctx.user()
1191 1191
1192 1192 lock = self.lock()
1193 1193 try:
1194 1194 tr = self.transaction("commit")
1195 1195 trp = weakref.proxy(tr)
1196 1196
1197 1197 if ctx.files():
1198 1198 m1 = p1.manifest().copy()
1199 1199 m2 = p2.manifest()
1200 1200
1201 1201 # check in files
1202 1202 new = {}
1203 1203 changed = []
1204 1204 linkrev = len(self)
1205 1205 for f in sorted(ctx.modified() + ctx.added()):
1206 1206 self.ui.note(f + "\n")
1207 1207 try:
1208 1208 fctx = ctx[f]
1209 1209 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1210 1210 changed)
1211 1211 m1.set(f, fctx.flags())
1212 1212 except OSError, inst:
1213 1213 self.ui.warn(_("trouble committing %s!\n") % f)
1214 1214 raise
1215 1215 except IOError, inst:
1216 1216 errcode = getattr(inst, 'errno', errno.ENOENT)
1217 1217 if error or errcode and errcode != errno.ENOENT:
1218 1218 self.ui.warn(_("trouble committing %s!\n") % f)
1219 1219 raise
1220 1220 else:
1221 1221 removed.append(f)
1222 1222
1223 1223 # update manifest
1224 1224 m1.update(new)
1225 1225 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1226 1226 drop = [f for f in removed if f in m1]
1227 1227 for f in drop:
1228 1228 del m1[f]
1229 1229 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1230 1230 p2.manifestnode(), (new, drop))
1231 1231 files = changed + removed
1232 1232 else:
1233 1233 mn = p1.manifestnode()
1234 1234 files = []
1235 1235
1236 1236 # update changelog
1237 1237 self.changelog.delayupdate()
1238 1238 n = self.changelog.add(mn, files, ctx.description(),
1239 1239 trp, p1.node(), p2.node(),
1240 1240 user, ctx.date(), ctx.extra().copy())
1241 1241 p = lambda: self.changelog.writepending() and self.root or ""
1242 1242 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1243 1243 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1244 1244 parent2=xp2, pending=p)
1245 1245 self.changelog.finalize(trp)
1246 # ensure the new commit is 1-phase
1247 phases.retractboundary(self, 1, [n])
1246 1248 tr.close()
1247 1249
1248 1250 if self._branchcache:
1249 1251 self.updatebranchcache()
1250 1252 return n
1251 1253 finally:
1252 1254 if tr:
1253 1255 tr.release()
1254 1256 lock.release()
1255 1257
1256 1258 def destroyed(self):
1257 1259 '''Inform the repository that nodes have been destroyed.
1258 1260 Intended for use by strip and rollback, so there's a common
1259 1261 place for anything that has to be done after destroying history.'''
1260 1262 # XXX it might be nice if we could take the list of destroyed
1261 1263 # nodes, but I don't see an easy way for rollback() to do that
1262 1264
1263 1265 # Ensure the persistent tag cache is updated. Doing it now
1264 1266 # means that the tag cache only has to worry about destroyed
1265 1267 # heads immediately after a strip/rollback. That in turn
1266 1268 # guarantees that "cachetip == currenttip" (comparing both rev
1267 1269 # and node) always means no nodes have been added or destroyed.
1268 1270
1269 1271 # XXX this is suboptimal when qrefresh'ing: we strip the current
1270 1272 # head, refresh the tag cache, then immediately add a new head.
1271 1273 # But I think doing it this way is necessary for the "instant
1272 1274 # tag cache retrieval" case to work.
1273 1275 self.invalidatecaches()
1274 1276
1275 1277 def walk(self, match, node=None):
1276 1278 '''
1277 1279 walk recursively through the directory tree or a given
1278 1280 changeset, finding all files matched by the match
1279 1281 function
1280 1282 '''
1281 1283 return self[node].walk(match)
1282 1284
1283 1285 def status(self, node1='.', node2=None, match=None,
1284 1286 ignored=False, clean=False, unknown=False,
1285 1287 listsubrepos=False):
1286 1288 """return status of files between two nodes or node and working directory
1287 1289
1288 1290 If node1 is None, use the first dirstate parent instead.
1289 1291 If node2 is None, compare node1 with working directory.
1290 1292 """
1291 1293
1292 1294 def mfmatches(ctx):
1293 1295 mf = ctx.manifest().copy()
1294 1296 for fn in mf.keys():
1295 1297 if not match(fn):
1296 1298 del mf[fn]
1297 1299 return mf
1298 1300
1299 1301 if isinstance(node1, context.changectx):
1300 1302 ctx1 = node1
1301 1303 else:
1302 1304 ctx1 = self[node1]
1303 1305 if isinstance(node2, context.changectx):
1304 1306 ctx2 = node2
1305 1307 else:
1306 1308 ctx2 = self[node2]
1307 1309
1308 1310 working = ctx2.rev() is None
1309 1311 parentworking = working and ctx1 == self['.']
1310 1312 match = match or matchmod.always(self.root, self.getcwd())
1311 1313 listignored, listclean, listunknown = ignored, clean, unknown
1312 1314
1313 1315 # load earliest manifest first for caching reasons
1314 1316 if not working and ctx2.rev() < ctx1.rev():
1315 1317 ctx2.manifest()
1316 1318
1317 1319 if not parentworking:
1318 1320 def bad(f, msg):
1319 1321 if f not in ctx1:
1320 1322 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1321 1323 match.bad = bad
1322 1324
1323 1325 if working: # we need to scan the working dir
1324 1326 subrepos = []
1325 1327 if '.hgsub' in self.dirstate:
1326 1328 subrepos = ctx2.substate.keys()
1327 1329 s = self.dirstate.status(match, subrepos, listignored,
1328 1330 listclean, listunknown)
1329 1331 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1330 1332
1331 1333 # check for any possibly clean files
1332 1334 if parentworking and cmp:
1333 1335 fixup = []
1334 1336 # do a full compare of any files that might have changed
1335 1337 for f in sorted(cmp):
1336 1338 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1337 1339 or ctx1[f].cmp(ctx2[f])):
1338 1340 modified.append(f)
1339 1341 else:
1340 1342 fixup.append(f)
1341 1343
1342 1344 # update dirstate for files that are actually clean
1343 1345 if fixup:
1344 1346 if listclean:
1345 1347 clean += fixup
1346 1348
1347 1349 try:
1348 1350 # updating the dirstate is optional
1349 1351 # so we don't wait on the lock
1350 1352 wlock = self.wlock(False)
1351 1353 try:
1352 1354 for f in fixup:
1353 1355 self.dirstate.normal(f)
1354 1356 finally:
1355 1357 wlock.release()
1356 1358 except error.LockError:
1357 1359 pass
1358 1360
1359 1361 if not parentworking:
1360 1362 mf1 = mfmatches(ctx1)
1361 1363 if working:
1362 1364 # we are comparing working dir against non-parent
1363 1365 # generate a pseudo-manifest for the working dir
1364 1366 mf2 = mfmatches(self['.'])
1365 1367 for f in cmp + modified + added:
1366 1368 mf2[f] = None
1367 1369 mf2.set(f, ctx2.flags(f))
1368 1370 for f in removed:
1369 1371 if f in mf2:
1370 1372 del mf2[f]
1371 1373 else:
1372 1374 # we are comparing two revisions
1373 1375 deleted, unknown, ignored = [], [], []
1374 1376 mf2 = mfmatches(ctx2)
1375 1377
1376 1378 modified, added, clean = [], [], []
1377 1379 for fn in mf2:
1378 1380 if fn in mf1:
1379 1381 if (fn not in deleted and
1380 1382 (mf1.flags(fn) != mf2.flags(fn) or
1381 1383 (mf1[fn] != mf2[fn] and
1382 1384 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1383 1385 modified.append(fn)
1384 1386 elif listclean:
1385 1387 clean.append(fn)
1386 1388 del mf1[fn]
1387 1389 elif fn not in deleted:
1388 1390 added.append(fn)
1389 1391 removed = mf1.keys()
1390 1392
1391 1393 if working and modified and not self.dirstate._checklink:
1392 1394 # Symlink placeholders may get non-symlink-like contents
1393 1395 # via user error or dereferencing by NFS or Samba servers,
1394 1396 # so we filter out any placeholders that don't look like a
1395 1397 # symlink
1396 1398 sane = []
1397 1399 for f in modified:
1398 1400 if ctx2.flags(f) == 'l':
1399 1401 d = ctx2[f].data()
1400 1402 if len(d) >= 1024 or '\n' in d or util.binary(d):
1401 1403 self.ui.debug('ignoring suspect symlink placeholder'
1402 1404 ' "%s"\n' % f)
1403 1405 continue
1404 1406 sane.append(f)
1405 1407 modified = sane
1406 1408
1407 1409 r = modified, added, removed, deleted, unknown, ignored, clean
1408 1410
1409 1411 if listsubrepos:
1410 1412 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1411 1413 if working:
1412 1414 rev2 = None
1413 1415 else:
1414 1416 rev2 = ctx2.substate[subpath][1]
1415 1417 try:
1416 1418 submatch = matchmod.narrowmatcher(subpath, match)
1417 1419 s = sub.status(rev2, match=submatch, ignored=listignored,
1418 1420 clean=listclean, unknown=listunknown,
1419 1421 listsubrepos=True)
1420 1422 for rfiles, sfiles in zip(r, s):
1421 1423 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1422 1424 except error.LookupError:
1423 1425 self.ui.status(_("skipping missing subrepository: %s\n")
1424 1426 % subpath)
1425 1427
1426 1428 for l in r:
1427 1429 l.sort()
1428 1430 return r
1429 1431
1430 1432 def heads(self, start=None):
1431 1433 heads = self.changelog.heads(start)
1432 1434 # sort the output in rev descending order
1433 1435 return sorted(heads, key=self.changelog.rev, reverse=True)
1434 1436
1435 1437 def branchheads(self, branch=None, start=None, closed=False):
1436 1438 '''return a (possibly filtered) list of heads for the given branch
1437 1439
1438 1440 Heads are returned in topological order, from newest to oldest.
1439 1441 If branch is None, use the dirstate branch.
1440 1442 If start is not None, return only heads reachable from start.
1441 1443 If closed is True, return heads that are marked as closed as well.
1442 1444 '''
1443 1445 if branch is None:
1444 1446 branch = self[None].branch()
1445 1447 branches = self.branchmap()
1446 1448 if branch not in branches:
1447 1449 return []
1448 1450 # the cache returns heads ordered lowest to highest
1449 1451 bheads = list(reversed(branches[branch]))
1450 1452 if start is not None:
1451 1453 # filter out the heads that cannot be reached from startrev
1452 1454 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1453 1455 bheads = [h for h in bheads if h in fbheads]
1454 1456 if not closed:
1455 1457 bheads = [h for h in bheads if
1456 1458 ('close' not in self.changelog.read(h)[5])]
1457 1459 return bheads
1458 1460
1459 1461 def branches(self, nodes):
1460 1462 if not nodes:
1461 1463 nodes = [self.changelog.tip()]
1462 1464 b = []
1463 1465 for n in nodes:
1464 1466 t = n
1465 1467 while True:
1466 1468 p = self.changelog.parents(n)
1467 1469 if p[1] != nullid or p[0] == nullid:
1468 1470 b.append((t, n, p[0], p[1]))
1469 1471 break
1470 1472 n = p[0]
1471 1473 return b
1472 1474
1473 1475 def between(self, pairs):
1474 1476 r = []
1475 1477
1476 1478 for top, bottom in pairs:
1477 1479 n, l, i = top, [], 0
1478 1480 f = 1
1479 1481
1480 1482 while n != bottom and n != nullid:
1481 1483 p = self.changelog.parents(n)[0]
1482 1484 if i == f:
1483 1485 l.append(n)
1484 1486 f = f * 2
1485 1487 n = p
1486 1488 i += 1
1487 1489
1488 1490 r.append(l)
1489 1491
1490 1492 return r
1491 1493
1492 1494 def pull(self, remote, heads=None, force=False):
1493 1495 lock = self.lock()
1494 1496 try:
1495 1497 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1496 1498 force=force)
1497 1499 common, fetch, rheads = tmp
1498 1500 if not fetch:
1499 1501 self.ui.status(_("no changes found\n"))
1500 1502 result = 0
1501 1503 else:
1502 1504 if heads is None and list(common) == [nullid]:
1503 1505 self.ui.status(_("requesting all changes\n"))
1504 1506 elif heads is None and remote.capable('changegroupsubset'):
1505 1507 # issue1320, avoid a race if remote changed after discovery
1506 1508 heads = rheads
1507 1509
1508 1510 if remote.capable('getbundle'):
1509 1511 cg = remote.getbundle('pull', common=common,
1510 1512 heads=heads or rheads)
1511 1513 elif heads is None:
1512 1514 cg = remote.changegroup(fetch, 'pull')
1513 1515 elif not remote.capable('changegroupsubset'):
1514 1516 raise util.Abort(_("partial pull cannot be done because "
1515 1517 "other repository doesn't support "
1516 1518 "changegroupsubset."))
1517 1519 else:
1518 1520 cg = remote.changegroupsubset(fetch, heads, 'pull')
1519 1521 result = self.addchangegroup(cg, 'pull', remote.url(),
1520 1522 lock=lock)
1521 1523 finally:
1522 1524 lock.release()
1523 1525
1524 1526 return result
1525 1527
1526 1528 def checkpush(self, force, revs):
1527 1529 """Extensions can override this function if additional checks have
1528 1530 to be performed before pushing, or call it if they override push
1529 1531 command.
1530 1532 """
1531 1533 pass
1532 1534
1533 1535 def push(self, remote, force=False, revs=None, newbranch=False):
1534 1536 '''Push outgoing changesets (limited by revs) from the current
1535 1537 repository to remote. Return an integer:
1536 1538 - 0 means HTTP error *or* nothing to push
1537 1539 - 1 means we pushed and remote head count is unchanged *or*
1538 1540 we have outgoing changesets but refused to push
1539 1541 - other values as described by addchangegroup()
1540 1542 '''
1541 1543 # there are two ways to push to remote repo:
1542 1544 #
1543 1545 # addchangegroup assumes local user can lock remote
1544 1546 # repo (local filesystem, old ssh servers).
1545 1547 #
1546 1548 # unbundle assumes local user cannot lock remote repo (new ssh
1547 1549 # servers, http servers).
1548 1550
1549 1551 self.checkpush(force, revs)
1550 1552 lock = None
1551 1553 unbundle = remote.capable('unbundle')
1552 1554 if not unbundle:
1553 1555 lock = remote.lock()
1554 1556 try:
1555 1557 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1556 1558 newbranch)
1557 1559 ret = remote_heads
1558 1560 if cg is not None:
1559 1561 if unbundle:
1560 1562 # local repo finds heads on server, finds out what
1561 1563 # revs it must push. once revs transferred, if server
1562 1564 # finds it has different heads (someone else won
1563 1565 # commit/push race), server aborts.
1564 1566 if force:
1565 1567 remote_heads = ['force']
1566 1568 # ssh: return remote's addchangegroup()
1567 1569 # http: return remote's addchangegroup() or 0 for error
1568 1570 ret = remote.unbundle(cg, remote_heads, 'push')
1569 1571 else:
1570 1572 # we return an integer indicating remote head count change
1571 1573 ret = remote.addchangegroup(cg, 'push', self.url(),
1572 1574 lock=lock)
1573 1575 finally:
1574 1576 if lock is not None:
1575 1577 lock.release()
1576 1578
1577 1579 self.ui.debug("checking for updated bookmarks\n")
1578 1580 rb = remote.listkeys('bookmarks')
1579 1581 for k in rb.keys():
1580 1582 if k in self._bookmarks:
1581 1583 nr, nl = rb[k], hex(self._bookmarks[k])
1582 1584 if nr in self:
1583 1585 cr = self[nr]
1584 1586 cl = self[nl]
1585 1587 if cl in cr.descendants():
1586 1588 r = remote.pushkey('bookmarks', k, nr, nl)
1587 1589 if r:
1588 1590 self.ui.status(_("updating bookmark %s\n") % k)
1589 1591 else:
1590 1592 self.ui.warn(_('updating bookmark %s'
1591 1593 ' failed!\n') % k)
1592 1594
1593 1595 return ret
1594 1596
1595 1597 def changegroupinfo(self, nodes, source):
1596 1598 if self.ui.verbose or source == 'bundle':
1597 1599 self.ui.status(_("%d changesets found\n") % len(nodes))
1598 1600 if self.ui.debugflag:
1599 1601 self.ui.debug("list of changesets:\n")
1600 1602 for node in nodes:
1601 1603 self.ui.debug("%s\n" % hex(node))
1602 1604
1603 1605 def changegroupsubset(self, bases, heads, source):
1604 1606 """Compute a changegroup consisting of all the nodes that are
1605 1607 descendants of any of the bases and ancestors of any of the heads.
1606 1608 Return a chunkbuffer object whose read() method will return
1607 1609 successive changegroup chunks.
1608 1610
1609 1611 It is fairly complex as determining which filenodes and which
1610 1612 manifest nodes need to be included for the changeset to be complete
1611 1613 is non-trivial.
1612 1614
1613 1615 Another wrinkle is doing the reverse, figuring out which changeset in
1614 1616 the changegroup a particular filenode or manifestnode belongs to.
1615 1617 """
1616 1618 cl = self.changelog
1617 1619 if not bases:
1618 1620 bases = [nullid]
1619 1621 csets, bases, heads = cl.nodesbetween(bases, heads)
1620 1622 # We assume that all ancestors of bases are known
1621 1623 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1622 1624 return self._changegroupsubset(common, csets, heads, source)
1623 1625
1624 1626 def getbundle(self, source, heads=None, common=None):
1625 1627 """Like changegroupsubset, but returns the set difference between the
1626 1628 ancestors of heads and the ancestors common.
1627 1629
1628 1630 If heads is None, use the local heads. If common is None, use [nullid].
1629 1631
1630 1632 The nodes in common might not all be known locally due to the way the
1631 1633 current discovery protocol works.
1632 1634 """
1633 1635 cl = self.changelog
1634 1636 if common:
1635 1637 nm = cl.nodemap
1636 1638 common = [n for n in common if n in nm]
1637 1639 else:
1638 1640 common = [nullid]
1639 1641 if not heads:
1640 1642 heads = cl.heads()
1641 1643 common, missing = cl.findcommonmissing(common, heads)
1642 1644 if not missing:
1643 1645 return None
1644 1646 return self._changegroupsubset(common, missing, heads, source)
1645 1647
1646 1648 def _changegroupsubset(self, commonrevs, csets, heads, source):
1647 1649
1648 1650 cl = self.changelog
1649 1651 mf = self.manifest
1650 1652 mfs = {} # needed manifests
1651 1653 fnodes = {} # needed file nodes
1652 1654 changedfiles = set()
1653 1655 fstate = ['', {}]
1654 1656 count = [0]
1655 1657
1656 1658 # can we go through the fast path ?
1657 1659 heads.sort()
1658 1660 if heads == sorted(self.heads()):
1659 1661 return self._changegroup(csets, source)
1660 1662
1661 1663 # slow path
1662 1664 self.hook('preoutgoing', throw=True, source=source)
1663 1665 self.changegroupinfo(csets, source)
1664 1666
1665 1667 # filter any nodes that claim to be part of the known set
1666 1668 def prune(revlog, missing):
1667 1669 return [n for n in missing
1668 1670 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1669 1671
1670 1672 def lookup(revlog, x):
1671 1673 if revlog == cl:
1672 1674 c = cl.read(x)
1673 1675 changedfiles.update(c[3])
1674 1676 mfs.setdefault(c[0], x)
1675 1677 count[0] += 1
1676 1678 self.ui.progress(_('bundling'), count[0],
1677 1679 unit=_('changesets'), total=len(csets))
1678 1680 return x
1679 1681 elif revlog == mf:
1680 1682 clnode = mfs[x]
1681 1683 mdata = mf.readfast(x)
1682 1684 for f in changedfiles:
1683 1685 if f in mdata:
1684 1686 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1685 1687 count[0] += 1
1686 1688 self.ui.progress(_('bundling'), count[0],
1687 1689 unit=_('manifests'), total=len(mfs))
1688 1690 return mfs[x]
1689 1691 else:
1690 1692 self.ui.progress(
1691 1693 _('bundling'), count[0], item=fstate[0],
1692 1694 unit=_('files'), total=len(changedfiles))
1693 1695 return fstate[1][x]
1694 1696
1695 1697 bundler = changegroup.bundle10(lookup)
1696 1698 reorder = self.ui.config('bundle', 'reorder', 'auto')
1697 1699 if reorder == 'auto':
1698 1700 reorder = None
1699 1701 else:
1700 1702 reorder = util.parsebool(reorder)
1701 1703
1702 1704 def gengroup():
1703 1705 # Create a changenode group generator that will call our functions
1704 1706 # back to lookup the owning changenode and collect information.
1705 1707 for chunk in cl.group(csets, bundler, reorder=reorder):
1706 1708 yield chunk
1707 1709 self.ui.progress(_('bundling'), None)
1708 1710
1709 1711 # Create a generator for the manifestnodes that calls our lookup
1710 1712 # and data collection functions back.
1711 1713 count[0] = 0
1712 1714 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1713 1715 yield chunk
1714 1716 self.ui.progress(_('bundling'), None)
1715 1717
1716 1718 mfs.clear()
1717 1719
1718 1720 # Go through all our files in order sorted by name.
1719 1721 count[0] = 0
1720 1722 for fname in sorted(changedfiles):
1721 1723 filerevlog = self.file(fname)
1722 1724 if not len(filerevlog):
1723 1725 raise util.Abort(_("empty or missing revlog for %s") % fname)
1724 1726 fstate[0] = fname
1725 1727 fstate[1] = fnodes.pop(fname, {})
1726 1728
1727 1729 nodelist = prune(filerevlog, fstate[1])
1728 1730 if nodelist:
1729 1731 count[0] += 1
1730 1732 yield bundler.fileheader(fname)
1731 1733 for chunk in filerevlog.group(nodelist, bundler, reorder):
1732 1734 yield chunk
1733 1735
1734 1736 # Signal that no more groups are left.
1735 1737 yield bundler.close()
1736 1738 self.ui.progress(_('bundling'), None)
1737 1739
1738 1740 if csets:
1739 1741 self.hook('outgoing', node=hex(csets[0]), source=source)
1740 1742
1741 1743 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1742 1744
1743 1745 def changegroup(self, basenodes, source):
1744 1746 # to avoid a race we use changegroupsubset() (issue1320)
1745 1747 return self.changegroupsubset(basenodes, self.heads(), source)
1746 1748
1747 1749 def _changegroup(self, nodes, source):
1748 1750 """Compute the changegroup of all nodes that we have that a recipient
1749 1751 doesn't. Return a chunkbuffer object whose read() method will return
1750 1752 successive changegroup chunks.
1751 1753
1752 1754 This is much easier than the previous function as we can assume that
1753 1755 the recipient has any changenode we aren't sending them.
1754 1756
1755 1757 nodes is the set of nodes to send"""
1756 1758
1757 1759 cl = self.changelog
1758 1760 mf = self.manifest
1759 1761 mfs = {}
1760 1762 changedfiles = set()
1761 1763 fstate = ['']
1762 1764 count = [0]
1763 1765
1764 1766 self.hook('preoutgoing', throw=True, source=source)
1765 1767 self.changegroupinfo(nodes, source)
1766 1768
1767 1769 revset = set([cl.rev(n) for n in nodes])
1768 1770
1769 1771 def gennodelst(log):
1770 1772 return [log.node(r) for r in log if log.linkrev(r) in revset]
1771 1773
1772 1774 def lookup(revlog, x):
1773 1775 if revlog == cl:
1774 1776 c = cl.read(x)
1775 1777 changedfiles.update(c[3])
1776 1778 mfs.setdefault(c[0], x)
1777 1779 count[0] += 1
1778 1780 self.ui.progress(_('bundling'), count[0],
1779 1781 unit=_('changesets'), total=len(nodes))
1780 1782 return x
1781 1783 elif revlog == mf:
1782 1784 count[0] += 1
1783 1785 self.ui.progress(_('bundling'), count[0],
1784 1786 unit=_('manifests'), total=len(mfs))
1785 1787 return cl.node(revlog.linkrev(revlog.rev(x)))
1786 1788 else:
1787 1789 self.ui.progress(
1788 1790 _('bundling'), count[0], item=fstate[0],
1789 1791 total=len(changedfiles), unit=_('files'))
1790 1792 return cl.node(revlog.linkrev(revlog.rev(x)))
1791 1793
1792 1794 bundler = changegroup.bundle10(lookup)
1793 1795 reorder = self.ui.config('bundle', 'reorder', 'auto')
1794 1796 if reorder == 'auto':
1795 1797 reorder = None
1796 1798 else:
1797 1799 reorder = util.parsebool(reorder)
1798 1800
1799 1801 def gengroup():
1800 1802 '''yield a sequence of changegroup chunks (strings)'''
1801 1803 # construct a list of all changed files
1802 1804
1803 1805 for chunk in cl.group(nodes, bundler, reorder=reorder):
1804 1806 yield chunk
1805 1807 self.ui.progress(_('bundling'), None)
1806 1808
1807 1809 count[0] = 0
1808 1810 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1809 1811 yield chunk
1810 1812 self.ui.progress(_('bundling'), None)
1811 1813
1812 1814 count[0] = 0
1813 1815 for fname in sorted(changedfiles):
1814 1816 filerevlog = self.file(fname)
1815 1817 if not len(filerevlog):
1816 1818 raise util.Abort(_("empty or missing revlog for %s") % fname)
1817 1819 fstate[0] = fname
1818 1820 nodelist = gennodelst(filerevlog)
1819 1821 if nodelist:
1820 1822 count[0] += 1
1821 1823 yield bundler.fileheader(fname)
1822 1824 for chunk in filerevlog.group(nodelist, bundler, reorder):
1823 1825 yield chunk
1824 1826 yield bundler.close()
1825 1827 self.ui.progress(_('bundling'), None)
1826 1828
1827 1829 if nodes:
1828 1830 self.hook('outgoing', node=hex(nodes[0]), source=source)
1829 1831
1830 1832 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1831 1833
1832 1834 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1833 1835 """Add the changegroup returned by source.read() to this repo.
1834 1836 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1835 1837 the URL of the repo where this changegroup is coming from.
1836 1838 If lock is not None, the function takes ownership of the lock
1837 1839 and releases it after the changegroup is added.
1838 1840
1839 1841 Return an integer summarizing the change to this repo:
1840 1842 - nothing changed or no source: 0
1841 1843 - more heads than before: 1+added heads (2..n)
1842 1844 - fewer heads than before: -1-removed heads (-2..-n)
1843 1845 - number of heads stays the same: 1
1844 1846 """
1845 1847 def csmap(x):
1846 1848 self.ui.debug("add changeset %s\n" % short(x))
1847 1849 return len(cl)
1848 1850
1849 1851 def revmap(x):
1850 1852 return cl.rev(x)
1851 1853
1852 1854 if not source:
1853 1855 return 0
1854 1856
1855 1857 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1856 1858
1857 1859 changesets = files = revisions = 0
1858 1860 efiles = set()
1859 1861
1860 1862 # write changelog data to temp files so concurrent readers will not see
1861 1863 # inconsistent view
1862 1864 cl = self.changelog
1863 1865 cl.delayupdate()
1864 1866 oldheads = cl.heads()
1865 1867
1866 1868 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1867 1869 try:
1868 1870 trp = weakref.proxy(tr)
1869 1871 # pull off the changeset group
1870 1872 self.ui.status(_("adding changesets\n"))
1871 1873 clstart = len(cl)
1872 1874 class prog(object):
1873 1875 step = _('changesets')
1874 1876 count = 1
1875 1877 ui = self.ui
1876 1878 total = None
1877 1879 def __call__(self):
1878 1880 self.ui.progress(self.step, self.count, unit=_('chunks'),
1879 1881 total=self.total)
1880 1882 self.count += 1
1881 1883 pr = prog()
1882 1884 source.callback = pr
1883 1885
1884 1886 source.changelogheader()
1885 1887 if (cl.addgroup(source, csmap, trp) is None
1886 1888 and not emptyok):
1887 1889 raise util.Abort(_("received changelog group is empty"))
1888 1890 clend = len(cl)
1889 1891 changesets = clend - clstart
1890 1892 for c in xrange(clstart, clend):
1891 1893 efiles.update(self[c].files())
1892 1894 efiles = len(efiles)
1893 1895 self.ui.progress(_('changesets'), None)
1894 1896
1895 1897 # pull off the manifest group
1896 1898 self.ui.status(_("adding manifests\n"))
1897 1899 pr.step = _('manifests')
1898 1900 pr.count = 1
1899 1901 pr.total = changesets # manifests <= changesets
1900 1902 # no need to check for empty manifest group here:
1901 1903 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1902 1904 # no new manifest will be created and the manifest group will
1903 1905 # be empty during the pull
1904 1906 source.manifestheader()
1905 1907 self.manifest.addgroup(source, revmap, trp)
1906 1908 self.ui.progress(_('manifests'), None)
1907 1909
1908 1910 needfiles = {}
1909 1911 if self.ui.configbool('server', 'validate', default=False):
1910 1912 # validate incoming csets have their manifests
1911 1913 for cset in xrange(clstart, clend):
1912 1914 mfest = self.changelog.read(self.changelog.node(cset))[0]
1913 1915 mfest = self.manifest.readdelta(mfest)
1914 1916 # store file nodes we must see
1915 1917 for f, n in mfest.iteritems():
1916 1918 needfiles.setdefault(f, set()).add(n)
1917 1919
1918 1920 # process the files
1919 1921 self.ui.status(_("adding file changes\n"))
1920 1922 pr.step = _('files')
1921 1923 pr.count = 1
1922 1924 pr.total = efiles
1923 1925 source.callback = None
1924 1926
1925 1927 while True:
1926 1928 chunkdata = source.filelogheader()
1927 1929 if not chunkdata:
1928 1930 break
1929 1931 f = chunkdata["filename"]
1930 1932 self.ui.debug("adding %s revisions\n" % f)
1931 1933 pr()
1932 1934 fl = self.file(f)
1933 1935 o = len(fl)
1934 1936 if fl.addgroup(source, revmap, trp) is None:
1935 1937 raise util.Abort(_("received file revlog group is empty"))
1936 1938 revisions += len(fl) - o
1937 1939 files += 1
1938 1940 if f in needfiles:
1939 1941 needs = needfiles[f]
1940 1942 for new in xrange(o, len(fl)):
1941 1943 n = fl.node(new)
1942 1944 if n in needs:
1943 1945 needs.remove(n)
1944 1946 if not needs:
1945 1947 del needfiles[f]
1946 1948 self.ui.progress(_('files'), None)
1947 1949
1948 1950 for f, needs in needfiles.iteritems():
1949 1951 fl = self.file(f)
1950 1952 for n in needs:
1951 1953 try:
1952 1954 fl.rev(n)
1953 1955 except error.LookupError:
1954 1956 raise util.Abort(
1955 1957 _('missing file data for %s:%s - run hg verify') %
1956 1958 (f, hex(n)))
1957 1959
1958 1960 dh = 0
1959 1961 if oldheads:
1960 1962 heads = cl.heads()
1961 1963 dh = len(heads) - len(oldheads)
1962 1964 for h in heads:
1963 1965 if h not in oldheads and 'close' in self[h].extra():
1964 1966 dh -= 1
1965 1967 htext = ""
1966 1968 if dh:
1967 1969 htext = _(" (%+d heads)") % dh
1968 1970
1969 1971 self.ui.status(_("added %d changesets"
1970 1972 " with %d changes to %d files%s\n")
1971 1973 % (changesets, revisions, files, htext))
1972 1974
1973 1975 if changesets > 0:
1974 1976 p = lambda: cl.writepending() and self.root or ""
1975 1977 self.hook('pretxnchangegroup', throw=True,
1976 1978 node=hex(cl.node(clstart)), source=srctype,
1977 1979 url=url, pending=p)
1978 1980
1979 1981 # make changelog see real files again
1980 1982 cl.finalize(trp)
1981 1983
1982 1984 tr.close()
1983 1985 finally:
1984 1986 tr.release()
1985 1987 if lock:
1986 1988 lock.release()
1987 1989
1988 1990 if changesets > 0:
1989 1991 # forcefully update the on-disk branch cache
1990 1992 self.ui.debug("updating the branch cache\n")
1991 1993 self.updatebranchcache()
1992 1994 self.hook("changegroup", node=hex(cl.node(clstart)),
1993 1995 source=srctype, url=url)
1994 1996
1995 1997 for i in xrange(clstart, clend):
1996 1998 self.hook("incoming", node=hex(cl.node(i)),
1997 1999 source=srctype, url=url)
1998 2000
1999 2001 # never return 0 here:
2000 2002 if dh < 0:
2001 2003 return dh - 1
2002 2004 else:
2003 2005 return dh + 1
2004 2006
2005 2007 def stream_in(self, remote, requirements):
2006 2008 lock = self.lock()
2007 2009 try:
2008 2010 fp = remote.stream_out()
2009 2011 l = fp.readline()
2010 2012 try:
2011 2013 resp = int(l)
2012 2014 except ValueError:
2013 2015 raise error.ResponseError(
2014 2016 _('Unexpected response from remote server:'), l)
2015 2017 if resp == 1:
2016 2018 raise util.Abort(_('operation forbidden by server'))
2017 2019 elif resp == 2:
2018 2020 raise util.Abort(_('locking the remote repository failed'))
2019 2021 elif resp != 0:
2020 2022 raise util.Abort(_('the server sent an unknown error code'))
2021 2023 self.ui.status(_('streaming all changes\n'))
2022 2024 l = fp.readline()
2023 2025 try:
2024 2026 total_files, total_bytes = map(int, l.split(' ', 1))
2025 2027 except (ValueError, TypeError):
2026 2028 raise error.ResponseError(
2027 2029 _('Unexpected response from remote server:'), l)
2028 2030 self.ui.status(_('%d files to transfer, %s of data\n') %
2029 2031 (total_files, util.bytecount(total_bytes)))
2030 2032 start = time.time()
2031 2033 for i in xrange(total_files):
2032 2034 # XXX doesn't support '\n' or '\r' in filenames
2033 2035 l = fp.readline()
2034 2036 try:
2035 2037 name, size = l.split('\0', 1)
2036 2038 size = int(size)
2037 2039 except (ValueError, TypeError):
2038 2040 raise error.ResponseError(
2039 2041 _('Unexpected response from remote server:'), l)
2040 2042 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2041 2043 # for backwards compat, name was partially encoded
2042 2044 ofp = self.sopener(store.decodedir(name), 'w')
2043 2045 for chunk in util.filechunkiter(fp, limit=size):
2044 2046 ofp.write(chunk)
2045 2047 ofp.close()
2046 2048 elapsed = time.time() - start
2047 2049 if elapsed <= 0:
2048 2050 elapsed = 0.001
2049 2051 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2050 2052 (util.bytecount(total_bytes), elapsed,
2051 2053 util.bytecount(total_bytes / elapsed)))
2052 2054
2053 2055 # new requirements = old non-format requirements + new format-related
2054 2056 # requirements from the streamed-in repository
2055 2057 requirements.update(set(self.requirements) - self.supportedformats)
2056 2058 self._applyrequirements(requirements)
2057 2059 self._writerequirements()
2058 2060
2059 2061 self.invalidate()
2060 2062 return len(self.heads()) + 1
2061 2063 finally:
2062 2064 lock.release()
2063 2065
2064 2066 def clone(self, remote, heads=[], stream=False):
2065 2067 '''clone remote repository.
2066 2068
2067 2069 keyword arguments:
2068 2070 heads: list of revs to clone (forces use of pull)
2069 2071 stream: use streaming clone if possible'''
2070 2072
2071 2073 # now, all clients that can request uncompressed clones can
2072 2074 # read repo formats supported by all servers that can serve
2073 2075 # them.
2074 2076
2075 2077 # if revlog format changes, client will have to check version
2076 2078 # and format flags on "stream" capability, and use
2077 2079 # uncompressed only if compatible.
2078 2080
2079 2081 if stream and not heads:
2080 2082 # 'stream' means remote revlog format is revlogv1 only
2081 2083 if remote.capable('stream'):
2082 2084 return self.stream_in(remote, set(('revlogv1',)))
2083 2085 # otherwise, 'streamreqs' contains the remote revlog format
2084 2086 streamreqs = remote.capable('streamreqs')
2085 2087 if streamreqs:
2086 2088 streamreqs = set(streamreqs.split(','))
2087 2089 # if we support it, stream in and adjust our requirements
2088 2090 if not streamreqs - self.supportedformats:
2089 2091 return self.stream_in(remote, streamreqs)
2090 2092 return self.pull(remote, heads)
2091 2093
2092 2094 def pushkey(self, namespace, key, old, new):
2093 2095 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2094 2096 old=old, new=new)
2095 2097 ret = pushkey.push(self, namespace, key, old, new)
2096 2098 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2097 2099 ret=ret)
2098 2100 return ret
2099 2101
2100 2102 def listkeys(self, namespace):
2101 2103 self.hook('prelistkeys', throw=True, namespace=namespace)
2102 2104 values = pushkey.list(self, namespace)
2103 2105 self.hook('listkeys', namespace=namespace, values=values)
2104 2106 return values
2105 2107
2106 2108 def debugwireargs(self, one, two, three=None, four=None, five=None):
2107 2109 '''used to test argument passing over the wire'''
2108 2110 return "%s %s %s %s %s" % (one, two, three, four, five)
2109 2111
2110 2112 def savecommitmessage(self, text):
2111 2113 fp = self.opener('last-message.txt', 'wb')
2112 2114 try:
2113 2115 fp.write(text)
2114 2116 finally:
2115 2117 fp.close()
2116 2118 return self.pathto(fp.name[len(self.root)+1:])
2117 2119
2118 2120 # used to avoid circular references so destructors work
2119 2121 def aftertrans(files):
2120 2122 renamefiles = [tuple(t) for t in files]
2121 2123 def a():
2122 2124 for src, dest in renamefiles:
2123 2125 util.rename(src, dest)
2124 2126 return a
2125 2127
2126 2128 def undoname(fn):
2127 2129 base, name = os.path.split(fn)
2128 2130 assert name.startswith('journal')
2129 2131 return os.path.join(base, name.replace('journal', 'undo', 1))
2130 2132
2131 2133 def instance(ui, path, create):
2132 2134 return localrepository(ui, util.urllocalpath(path), create)
2133 2135
2134 2136 def islocal(path):
2135 2137 return True
@@ -1,113 +1,115 b''
1 1 Init repo1:
2 2
3 3 $ hg init repo1
4 4 $ cd repo1
5 5 $ echo "some text" > a
6 6 $ hg add
7 7 adding a
8 8 $ hg ci -m first
9 9 $ cat .hg/store/fncache | sort
10 10 data/a.i
11 11
12 12 Testing a.i/b:
13 13
14 14 $ mkdir a.i
15 15 $ echo "some other text" > a.i/b
16 16 $ hg add
17 17 adding a.i/b (glob)
18 18 $ hg ci -m second
19 19 $ cat .hg/store/fncache | sort
20 20 data/a.i
21 21 data/a.i.hg/b.i
22 22
23 23 Testing a.i.hg/c:
24 24
25 25 $ mkdir a.i.hg
26 26 $ echo "yet another text" > a.i.hg/c
27 27 $ hg add
28 28 adding a.i.hg/c (glob)
29 29 $ hg ci -m third
30 30 $ cat .hg/store/fncache | sort
31 31 data/a.i
32 32 data/a.i.hg.hg/c.i
33 33 data/a.i.hg/b.i
34 34
35 35 Testing verify:
36 36
37 37 $ hg verify
38 38 checking changesets
39 39 checking manifests
40 40 crosschecking files in changesets and manifests
41 41 checking files
42 42 3 files, 3 changesets, 3 total revisions
43 43
44 44 $ rm .hg/store/fncache
45 45
46 46 $ hg verify
47 47 checking changesets
48 48 checking manifests
49 49 crosschecking files in changesets and manifests
50 50 checking files
51 51 data/a.i@0: missing revlog!
52 52 data/a.i.hg/c.i@2: missing revlog!
53 53 data/a.i/b.i@1: missing revlog!
54 54 3 files, 3 changesets, 3 total revisions
55 55 3 integrity errors encountered!
56 56 (first damaged changeset appears to be 0)
57 57 [1]
58 58 $ cd ..
59 59
60 60 Non store repo:
61 61
62 62 $ hg --config format.usestore=False init foo
63 63 $ cd foo
64 64 $ mkdir tst.d
65 65 $ echo foo > tst.d/foo
66 66 $ hg ci -Amfoo
67 67 adding tst.d/foo
68 68 $ find .hg | sort
69 69 .hg
70 70 .hg/00changelog.i
71 71 .hg/00manifest.i
72 72 .hg/data
73 73 .hg/data/tst.d.hg
74 74 .hg/data/tst.d.hg/foo.i
75 75 .hg/dirstate
76 76 .hg/last-message.txt
77 .hg/phaseroots
77 78 .hg/requires
78 79 .hg/undo
79 80 .hg/undo.bookmarks
80 81 .hg/undo.branch
81 82 .hg/undo.desc
82 83 .hg/undo.dirstate
83 84 .hg/undo.phaseroots
84 85 $ cd ..
85 86
86 87 Non fncache repo:
87 88
88 89 $ hg --config format.usefncache=False init bar
89 90 $ cd bar
90 91 $ mkdir tst.d
91 92 $ echo foo > tst.d/Foo
92 93 $ hg ci -Amfoo
93 94 adding tst.d/Foo
94 95 $ find .hg | sort
95 96 .hg
96 97 .hg/00changelog.i
97 98 .hg/dirstate
98 99 .hg/last-message.txt
99 100 .hg/requires
100 101 .hg/store
101 102 .hg/store/00changelog.i
102 103 .hg/store/00manifest.i
103 104 .hg/store/data
104 105 .hg/store/data/tst.d.hg
105 106 .hg/store/data/tst.d.hg/_foo.i
107 .hg/store/phaseroots
106 108 .hg/store/undo
107 109 .hg/store/undo.phaseroots
108 110 .hg/undo.bookmarks
109 111 .hg/undo.branch
110 112 .hg/undo.desc
111 113 .hg/undo.dirstate
112 114 $ cd ..
113 115
@@ -1,340 +1,345 b''
1 1 $ "$TESTDIR/hghave" no-windows || exit 80
2 2
3 3 $ cat > nlinks.py <<EOF
4 4 > import os, sys
5 5 > for f in sorted(sys.stdin.readlines()):
6 6 > f = f[:-1]
7 7 > print os.lstat(f).st_nlink, f
8 8 > EOF
9 9
10 10 $ nlinksdir()
11 11 > {
12 12 > find $1 -type f | python $TESTTMP/nlinks.py
13 13 > }
14 14
15 15 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
16 16
17 17 $ cat > linkcp.py <<EOF
18 18 > from mercurial import util
19 19 > import sys
20 20 > util.copyfiles(sys.argv[1], sys.argv[2], hardlink=True)
21 21 > EOF
22 22
23 23 $ linkcp()
24 24 > {
25 25 > python $TESTTMP/linkcp.py $1 $2
26 26 > }
27 27
28 28 Prepare repo r1:
29 29
30 30 $ hg init r1
31 31 $ cd r1
32 32
33 33 $ echo c1 > f1
34 34 $ hg add f1
35 35 $ hg ci -m0
36 36
37 37 $ mkdir d1
38 38 $ cd d1
39 39 $ echo c2 > f2
40 40 $ hg add f2
41 41 $ hg ci -m1
42 42 $ cd ../..
43 43
44 44 $ nlinksdir r1/.hg/store
45 45 1 r1/.hg/store/00changelog.i
46 46 1 r1/.hg/store/00manifest.i
47 47 1 r1/.hg/store/data/d1/f2.i
48 48 1 r1/.hg/store/data/f1.i
49 49 1 r1/.hg/store/fncache
50 1 r1/.hg/store/phaseroots
50 51 1 r1/.hg/store/undo
51 52 1 r1/.hg/store/undo.phaseroots
52 53
53 54
54 55 Create hardlinked clone r2:
55 56
56 57 $ hg clone -U --debug r1 r2
57 58 linked 7 files
58 59
59 60 Create non-hardlinked clone r3:
60 61
61 62 $ hg clone --pull r1 r3
62 63 requesting all changes
63 64 adding changesets
64 65 adding manifests
65 66 adding file changes
66 67 added 2 changesets with 2 changes to 2 files
67 68 updating to branch default
68 69 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
69 70
70 71
71 72 Repos r1 and r2 should now contain hardlinked files:
72 73
73 74 $ nlinksdir r1/.hg/store
74 75 2 r1/.hg/store/00changelog.i
75 76 2 r1/.hg/store/00manifest.i
76 77 2 r1/.hg/store/data/d1/f2.i
77 78 2 r1/.hg/store/data/f1.i
78 79 2 r1/.hg/store/fncache
80 1 r1/.hg/store/phaseroots
79 81 1 r1/.hg/store/undo
80 82 1 r1/.hg/store/undo.phaseroots
81 83
82 84 $ nlinksdir r2/.hg/store
83 85 2 r2/.hg/store/00changelog.i
84 86 2 r2/.hg/store/00manifest.i
85 87 2 r2/.hg/store/data/d1/f2.i
86 88 2 r2/.hg/store/data/f1.i
87 89 2 r2/.hg/store/fncache
88 90
89 91 Repo r3 should not be hardlinked:
90 92
91 93 $ nlinksdir r3/.hg/store
92 94 1 r3/.hg/store/00changelog.i
93 95 1 r3/.hg/store/00manifest.i
94 96 1 r3/.hg/store/data/d1/f2.i
95 97 1 r3/.hg/store/data/f1.i
96 98 1 r3/.hg/store/fncache
97 99 1 r3/.hg/store/undo
98 100 1 r3/.hg/store/undo.phaseroots
99 101
100 102
101 103 Create a non-inlined filelog in r3:
102 104
103 105 $ cd r3/d1
104 106 $ python -c 'for x in range(10000): print x' >> data1
105 107 $ for j in 0 1 2 3 4 5 6 7 8 9; do
106 108 > cat data1 >> f2
107 109 > hg commit -m$j
108 110 > done
109 111 $ cd ../..
110 112
111 113 $ nlinksdir r3/.hg/store
112 114 1 r3/.hg/store/00changelog.i
113 115 1 r3/.hg/store/00manifest.i
114 116 1 r3/.hg/store/data/d1/f2.d
115 117 1 r3/.hg/store/data/d1/f2.i
116 118 1 r3/.hg/store/data/f1.i
117 119 1 r3/.hg/store/fncache
120 1 r3/.hg/store/phaseroots
118 121 1 r3/.hg/store/undo
119 122 1 r3/.hg/store/undo.phaseroots
120 123
121 124 Push to repo r1 should break up most hardlinks in r2:
122 125
123 126 $ hg -R r2 verify
124 127 checking changesets
125 128 checking manifests
126 129 crosschecking files in changesets and manifests
127 130 checking files
128 131 2 files, 2 changesets, 2 total revisions
129 132
130 133 $ cd r3
131 134 $ hg push
132 135 pushing to $TESTTMP/r1
133 136 searching for changes
134 137 adding changesets
135 138 adding manifests
136 139 adding file changes
137 140 added 10 changesets with 10 changes to 1 files
138 141
139 142 $ cd ..
140 143
141 144 $ nlinksdir r2/.hg/store
142 145 1 r2/.hg/store/00changelog.i
143 146 1 r2/.hg/store/00manifest.i
144 147 1 r2/.hg/store/data/d1/f2.i
145 148 2 r2/.hg/store/data/f1.i
146 149 1 r2/.hg/store/fncache
147 150
148 151 $ hg -R r2 verify
149 152 checking changesets
150 153 checking manifests
151 154 crosschecking files in changesets and manifests
152 155 checking files
153 156 2 files, 2 changesets, 2 total revisions
154 157
155 158
156 159 $ cd r1
157 160 $ hg up
158 161 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
159 162
160 163 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
161 164
162 165 $ echo c1c1 >> f1
163 166 $ hg ci -m00
164 167 $ cd ..
165 168
166 169 $ nlinksdir r2/.hg/store
167 170 1 r2/.hg/store/00changelog.i
168 171 1 r2/.hg/store/00manifest.i
169 172 1 r2/.hg/store/data/d1/f2.i
170 173 1 r2/.hg/store/data/f1.i
171 174 1 r2/.hg/store/fncache
172 175
173 176
174 177 $ cd r3
175 178 $ hg tip --template '{rev}:{node|short}\n'
176 179 11:a6451b6bc41f
177 180 $ echo bla > f1
178 181 $ hg ci -m1
179 182 $ cd ..
180 183
181 184 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
182 185
183 186 $ linkcp r3 r4
184 187
185 188 r4 has hardlinks in the working dir (not just inside .hg):
186 189
187 190 $ nlinksdir r4
188 191 2 r4/.hg/00changelog.i
189 192 2 r4/.hg/branch
190 193 2 r4/.hg/cache/branchheads
191 194 2 r4/.hg/cache/tags
192 195 2 r4/.hg/dirstate
193 196 2 r4/.hg/hgrc
194 197 2 r4/.hg/last-message.txt
195 198 2 r4/.hg/requires
196 199 2 r4/.hg/store/00changelog.i
197 200 2 r4/.hg/store/00manifest.i
198 201 2 r4/.hg/store/data/d1/f2.d
199 202 2 r4/.hg/store/data/d1/f2.i
200 203 2 r4/.hg/store/data/f1.i
201 204 2 r4/.hg/store/fncache
205 2 r4/.hg/store/phaseroots
202 206 2 r4/.hg/store/undo
203 207 2 r4/.hg/store/undo.phaseroots
204 208 2 r4/.hg/undo.bookmarks
205 209 2 r4/.hg/undo.branch
206 210 2 r4/.hg/undo.desc
207 211 2 r4/.hg/undo.dirstate
208 212 2 r4/d1/data1
209 213 2 r4/d1/f2
210 214 2 r4/f1
211 215
212 216 Update back to revision 11 in r4 should break hardlink of file f1:
213 217
214 218 $ hg -R r4 up 11
215 219 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 220
217 221 $ nlinksdir r4
218 222 2 r4/.hg/00changelog.i
219 223 1 r4/.hg/branch
220 224 2 r4/.hg/cache/branchheads
221 225 2 r4/.hg/cache/tags
222 226 1 r4/.hg/dirstate
223 227 2 r4/.hg/hgrc
224 228 2 r4/.hg/last-message.txt
225 229 2 r4/.hg/requires
226 230 2 r4/.hg/store/00changelog.i
227 231 2 r4/.hg/store/00manifest.i
228 232 2 r4/.hg/store/data/d1/f2.d
229 233 2 r4/.hg/store/data/d1/f2.i
230 234 2 r4/.hg/store/data/f1.i
231 235 2 r4/.hg/store/fncache
236 2 r4/.hg/store/phaseroots
232 237 2 r4/.hg/store/undo
233 238 2 r4/.hg/store/undo.phaseroots
234 239 2 r4/.hg/undo.bookmarks
235 240 2 r4/.hg/undo.branch
236 241 2 r4/.hg/undo.desc
237 242 2 r4/.hg/undo.dirstate
238 243 2 r4/d1/data1
239 244 2 r4/d1/f2
240 245 1 r4/f1
241 246
242 247
243 248 Test hardlinking outside hg:
244 249
245 250 $ mkdir x
246 251 $ echo foo > x/a
247 252
248 253 $ linkcp x y
249 254 $ echo bar >> y/a
250 255
251 256 No diff if hardlink:
252 257
253 258 $ diff x/a y/a
254 259
255 260 Test mq hardlinking:
256 261
257 262 $ echo "[extensions]" >> $HGRCPATH
258 263 $ echo "mq=" >> $HGRCPATH
259 264
260 265 $ hg init a
261 266 $ cd a
262 267
263 268 $ hg qimport -n foo - << EOF
264 269 > # HG changeset patch
265 270 > # Date 1 0
266 271 > diff -r 2588a8b53d66 a
267 272 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
268 273 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
269 274 > @@ -0,0 +1,1 @@
270 275 > +a
271 276 > EOF
272 277 adding foo to series file
273 278
274 279 $ hg qpush
275 280 applying foo
276 281 now at: foo
277 282
278 283 $ cd ..
279 284 $ linkcp a b
280 285 $ cd b
281 286
282 287 $ hg qimport -n bar - << EOF
283 288 > # HG changeset patch
284 289 > # Date 2 0
285 290 > diff -r 2588a8b53d66 a
286 291 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
287 292 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
288 293 > @@ -0,0 +1,1 @@
289 294 > +b
290 295 > EOF
291 296 adding bar to series file
292 297
293 298 $ hg qpush
294 299 applying bar
295 300 now at: bar
296 301
297 302 $ cat .hg/patches/status
298 303 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
299 304 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
300 305
301 306 $ cat .hg/patches/series
302 307 foo
303 308 bar
304 309
305 310 $ cat ../a/.hg/patches/status
306 311 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
307 312
308 313 $ cat ../a/.hg/patches/series
309 314 foo
310 315
311 316 Test tags hardlinking:
312 317
313 318 $ hg qdel -r qbase:qtip
314 319 patch foo finalized without changeset message
315 320 patch bar finalized without changeset message
316 321
317 322 $ hg tag -l lfoo
318 323 $ hg tag foo
319 324
320 325 $ cd ..
321 326 $ linkcp b c
322 327 $ cd c
323 328
324 329 $ hg tag -l -r 0 lbar
325 330 $ hg tag -r 0 bar
326 331
327 332 $ cat .hgtags
328 333 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
329 334 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
330 335
331 336 $ cat .hg/localtags
332 337 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
333 338 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
334 339
335 340 $ cat ../b/.hgtags
336 341 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
337 342
338 343 $ cat ../b/.hg/localtags
339 344 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
340 345
@@ -1,145 +1,146 b''
1 1 test that new files created in .hg inherit the permissions from .hg/store
2 2
3 3
4 4 $ "$TESTDIR/hghave" unix-permissions || exit 80
5 5
6 6 $ mkdir dir
7 7
8 8 just in case somebody has a strange $TMPDIR
9 9
10 10 $ chmod g-s dir
11 11 $ cd dir
12 12
13 13 $ cat >printmodes.py <<EOF
14 14 > import os, sys
15 15 >
16 16 > allnames = []
17 17 > isdir = {}
18 18 > for root, dirs, files in os.walk(sys.argv[1]):
19 19 > for d in dirs:
20 20 > name = os.path.join(root, d)
21 21 > isdir[name] = 1
22 22 > allnames.append(name)
23 23 > for f in files:
24 24 > name = os.path.join(root, f)
25 25 > allnames.append(name)
26 26 > allnames.sort()
27 27 > for name in allnames:
28 28 > suffix = name in isdir and '/' or ''
29 29 > print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
30 30 > EOF
31 31
32 32 $ cat >mode.py <<EOF
33 33 > import sys
34 34 > import os
35 35 > print '%05o' % os.lstat(sys.argv[1]).st_mode
36 36 > EOF
37 37
38 38 $ umask 077
39 39
40 40 $ hg init repo
41 41 $ cd repo
42 42
43 43 $ chmod 0770 .hg/store
44 44
45 45 before commit
46 46 store can be written by the group, other files cannot
47 47 store is setgid
48 48
49 49 $ python ../printmodes.py .
50 50 00700 ./.hg/
51 51 00600 ./.hg/00changelog.i
52 52 00600 ./.hg/requires
53 53 00770 ./.hg/store/
54 54
55 55 $ mkdir dir
56 56 $ touch foo dir/bar
57 57 $ hg ci -qAm 'add files'
58 58
59 59 after commit
60 60 working dir files can only be written by the owner
61 61 files created in .hg can be written by the group
62 62 (in particular, store/**, dirstate, branch cache file, undo files)
63 63 new directories are setgid
64 64
65 65 $ python ../printmodes.py .
66 66 00700 ./.hg/
67 67 00600 ./.hg/00changelog.i
68 68 00660 ./.hg/dirstate
69 69 00660 ./.hg/last-message.txt
70 70 00600 ./.hg/requires
71 71 00770 ./.hg/store/
72 72 00660 ./.hg/store/00changelog.i
73 73 00660 ./.hg/store/00manifest.i
74 74 00770 ./.hg/store/data/
75 75 00770 ./.hg/store/data/dir/
76 76 00660 ./.hg/store/data/dir/bar.i
77 77 00660 ./.hg/store/data/foo.i
78 78 00660 ./.hg/store/fncache
79 00660 ./.hg/store/phaseroots
79 80 00660 ./.hg/store/undo
80 81 00660 ./.hg/store/undo.phaseroots
81 82 00660 ./.hg/undo.bookmarks
82 83 00660 ./.hg/undo.branch
83 84 00660 ./.hg/undo.desc
84 85 00660 ./.hg/undo.dirstate
85 86 00700 ./dir/
86 87 00600 ./dir/bar
87 88 00600 ./foo
88 89
89 90 $ umask 007
90 91 $ hg init ../push
91 92
92 93 before push
93 94 group can write everything
94 95
95 96 $ python ../printmodes.py ../push
96 97 00770 ../push/.hg/
97 98 00660 ../push/.hg/00changelog.i
98 99 00660 ../push/.hg/requires
99 100 00770 ../push/.hg/store/
100 101
101 102 $ umask 077
102 103 $ hg -q push ../push
103 104
104 105 after push
105 106 group can still write everything
106 107
107 108 $ python ../printmodes.py ../push
108 109 00770 ../push/.hg/
109 110 00660 ../push/.hg/00changelog.i
110 111 00770 ../push/.hg/cache/
111 112 00660 ../push/.hg/cache/branchheads
112 113 00660 ../push/.hg/requires
113 114 00770 ../push/.hg/store/
114 115 00660 ../push/.hg/store/00changelog.i
115 116 00660 ../push/.hg/store/00manifest.i
116 117 00770 ../push/.hg/store/data/
117 118 00770 ../push/.hg/store/data/dir/
118 119 00660 ../push/.hg/store/data/dir/bar.i
119 120 00660 ../push/.hg/store/data/foo.i
120 121 00660 ../push/.hg/store/fncache
121 122 00660 ../push/.hg/store/undo
122 123 00660 ../push/.hg/store/undo.phaseroots
123 124 00660 ../push/.hg/undo.bookmarks
124 125 00660 ../push/.hg/undo.branch
125 126 00660 ../push/.hg/undo.desc
126 127 00660 ../push/.hg/undo.dirstate
127 128
128 129
129 130 Test that we don't lose the setgid bit when we call chmod.
130 131 Not all systems support setgid directories (e.g. HFS+), so
131 132 just check that directories have the same mode.
132 133
133 134 $ cd ..
134 135 $ hg init setgid
135 136 $ cd setgid
136 137 $ chmod g+rwx .hg/store
137 138 $ chmod g+s .hg/store 2> /dev/null
138 139 $ mkdir dir
139 140 $ touch dir/file
140 141 $ hg ci -qAm 'add dir/file'
141 142 $ storemode=`python ../mode.py .hg/store`
142 143 $ dirmode=`python ../mode.py .hg/store/data/dir`
143 144 $ if [ "$storemode" != "$dirmode" ]; then
144 145 > echo "$storemode != $dirmode"
145 146 $ fi
@@ -1,10 +1,10 b''
1 1 $ alias hglog='hg log --template "{rev} {phase} {desc}\n"'
2 2
3 3 $ hg init initialrepo
4 4 $ cd initialrepo
5 5 $ touch sam
6 6 $ hg add sam
7 7 $ hg ci -m 'first'
8 8
9 9 $ hglog
10 0 0 first
10 0 1 first
General Comments 0
You need to be logged in to leave comments. Login now