##// END OF EJS Templates
rollback: use a hint for force
Matt Mackall -
r15187:0292f88d default
parent child Browse files
Show More
@@ -1,2081 +1,2081 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener.append(
61 61 "00changelog.i",
62 62 '\0\0\0\2' # represents revlogv2
63 63 ' dummy changelog to prevent using the old repo layout'
64 64 )
65 65 if self.ui.configbool('format', 'generaldelta', False):
66 66 requirements.append("generaldelta")
67 67 requirements = set(requirements)
68 68 else:
69 69 raise error.RepoError(_("repository %s not found") % path)
70 70 elif create:
71 71 raise error.RepoError(_("repository %s already exists") % path)
72 72 else:
73 73 try:
74 74 requirements = scmutil.readrequires(self.opener, self.supported)
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 requirements = set()
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100
101 101 self._branchcache = None
102 102 self._branchcachetip = None
103 103 self.filterpats = {}
104 104 self._datafilters = {}
105 105 self._transref = self._lockref = self._wlockref = None
106 106
107 107 # A cache for various files under .hg/ that tracks file changes,
108 108 # (used by the filecache decorator)
109 109 #
110 110 # Maps a property name to its util.filecacheentry
111 111 self._filecache = {}
112 112
113 113 def _applyrequirements(self, requirements):
114 114 self.requirements = requirements
115 115 openerreqs = set(('revlogv1', 'generaldelta'))
116 116 self.sopener.options = dict((r, 1) for r in requirements
117 117 if r in openerreqs)
118 118
119 119 def _writerequirements(self):
120 120 reqfile = self.opener("requires", "w")
121 121 for r in self.requirements:
122 122 reqfile.write("%s\n" % r)
123 123 reqfile.close()
124 124
125 125 def _checknested(self, path):
126 126 """Determine if path is a legal nested repository."""
127 127 if not path.startswith(self.root):
128 128 return False
129 129 subpath = path[len(self.root) + 1:]
130 130
131 131 # XXX: Checking against the current working copy is wrong in
132 132 # the sense that it can reject things like
133 133 #
134 134 # $ hg cat -r 10 sub/x.txt
135 135 #
136 136 # if sub/ is no longer a subrepository in the working copy
137 137 # parent revision.
138 138 #
139 139 # However, it can of course also allow things that would have
140 140 # been rejected before, such as the above cat command if sub/
141 141 # is a subrepository now, but was a normal directory before.
142 142 # The old path auditor would have rejected by mistake since it
143 143 # panics when it sees sub/.hg/.
144 144 #
145 145 # All in all, checking against the working copy seems sensible
146 146 # since we want to prevent access to nested repositories on
147 147 # the filesystem *now*.
148 148 ctx = self[None]
149 149 parts = util.splitpath(subpath)
150 150 while parts:
151 151 prefix = os.sep.join(parts)
152 152 if prefix in ctx.substate:
153 153 if prefix == subpath:
154 154 return True
155 155 else:
156 156 sub = ctx.sub(prefix)
157 157 return sub.checknested(subpath[len(prefix) + 1:])
158 158 else:
159 159 parts.pop()
160 160 return False
161 161
162 162 @filecache('bookmarks')
163 163 def _bookmarks(self):
164 164 return bookmarks.read(self)
165 165
166 166 @filecache('bookmarks.current')
167 167 def _bookmarkcurrent(self):
168 168 return bookmarks.readcurrent(self)
169 169
170 170 @filecache('00changelog.i', True)
171 171 def changelog(self):
172 172 c = changelog.changelog(self.sopener)
173 173 if 'HG_PENDING' in os.environ:
174 174 p = os.environ['HG_PENDING']
175 175 if p.startswith(self.root):
176 176 c.readpending('00changelog.i.a')
177 177 return c
178 178
179 179 @filecache('00manifest.i', True)
180 180 def manifest(self):
181 181 return manifest.manifest(self.sopener)
182 182
183 183 @filecache('dirstate')
184 184 def dirstate(self):
185 185 warned = [0]
186 186 def validate(node):
187 187 try:
188 188 self.changelog.rev(node)
189 189 return node
190 190 except error.LookupError:
191 191 if not warned[0]:
192 192 warned[0] = True
193 193 self.ui.warn(_("warning: ignoring unknown"
194 194 " working parent %s!\n") % short(node))
195 195 return nullid
196 196
197 197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198 198
199 199 def __getitem__(self, changeid):
200 200 if changeid is None:
201 201 return context.workingctx(self)
202 202 return context.changectx(self, changeid)
203 203
204 204 def __contains__(self, changeid):
205 205 try:
206 206 return bool(self.lookup(changeid))
207 207 except error.RepoLookupError:
208 208 return False
209 209
210 210 def __nonzero__(self):
211 211 return True
212 212
213 213 def __len__(self):
214 214 return len(self.changelog)
215 215
216 216 def __iter__(self):
217 217 for i in xrange(len(self)):
218 218 yield i
219 219
220 220 def set(self, expr, *args):
221 221 '''
222 222 Yield a context for each matching revision, after doing arg
223 223 replacement via revset.formatspec
224 224 '''
225 225
226 226 expr = revset.formatspec(expr, *args)
227 227 m = revset.match(None, expr)
228 228 for r in m(self, range(len(self))):
229 229 yield self[r]
230 230
231 231 def url(self):
232 232 return 'file:' + self.root
233 233
234 234 def hook(self, name, throw=False, **args):
235 235 return hook.hook(self.ui, self, name, throw, **args)
236 236
237 237 tag_disallowed = ':\r\n'
238 238
239 239 def _tag(self, names, node, message, local, user, date, extra={}):
240 240 if isinstance(names, str):
241 241 allchars = names
242 242 names = (names,)
243 243 else:
244 244 allchars = ''.join(names)
245 245 for c in self.tag_disallowed:
246 246 if c in allchars:
247 247 raise util.Abort(_('%r cannot be used in a tag name') % c)
248 248
249 249 branches = self.branchmap()
250 250 for name in names:
251 251 self.hook('pretag', throw=True, node=hex(node), tag=name,
252 252 local=local)
253 253 if name in branches:
254 254 self.ui.warn(_("warning: tag %s conflicts with existing"
255 255 " branch name\n") % name)
256 256
257 257 def writetags(fp, names, munge, prevtags):
258 258 fp.seek(0, 2)
259 259 if prevtags and prevtags[-1] != '\n':
260 260 fp.write('\n')
261 261 for name in names:
262 262 m = munge and munge(name) or name
263 263 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
264 264 old = self.tags().get(name, nullid)
265 265 fp.write('%s %s\n' % (hex(old), m))
266 266 fp.write('%s %s\n' % (hex(node), m))
267 267 fp.close()
268 268
269 269 prevtags = ''
270 270 if local:
271 271 try:
272 272 fp = self.opener('localtags', 'r+')
273 273 except IOError:
274 274 fp = self.opener('localtags', 'a')
275 275 else:
276 276 prevtags = fp.read()
277 277
278 278 # local tags are stored in the current charset
279 279 writetags(fp, names, None, prevtags)
280 280 for name in names:
281 281 self.hook('tag', node=hex(node), tag=name, local=local)
282 282 return
283 283
284 284 try:
285 285 fp = self.wfile('.hgtags', 'rb+')
286 286 except IOError, e:
287 287 if e.errno != errno.ENOENT:
288 288 raise
289 289 fp = self.wfile('.hgtags', 'ab')
290 290 else:
291 291 prevtags = fp.read()
292 292
293 293 # committed tags are stored in UTF-8
294 294 writetags(fp, names, encoding.fromlocal, prevtags)
295 295
296 296 fp.close()
297 297
298 298 if '.hgtags' not in self.dirstate:
299 299 self[None].add(['.hgtags'])
300 300
301 301 m = matchmod.exact(self.root, '', ['.hgtags'])
302 302 tagnode = self.commit(message, user, date, extra=extra, match=m)
303 303
304 304 for name in names:
305 305 self.hook('tag', node=hex(node), tag=name, local=local)
306 306
307 307 return tagnode
308 308
309 309 def tag(self, names, node, message, local, user, date):
310 310 '''tag a revision with one or more symbolic names.
311 311
312 312 names is a list of strings or, when adding a single tag, names may be a
313 313 string.
314 314
315 315 if local is True, the tags are stored in a per-repository file.
316 316 otherwise, they are stored in the .hgtags file, and a new
317 317 changeset is committed with the change.
318 318
319 319 keyword arguments:
320 320
321 321 local: whether to store tags in non-version-controlled file
322 322 (default False)
323 323
324 324 message: commit message to use if committing
325 325
326 326 user: name of user to use if committing
327 327
328 328 date: date tuple to use if committing'''
329 329
330 330 if not local:
331 331 for x in self.status()[:5]:
332 332 if '.hgtags' in x:
333 333 raise util.Abort(_('working copy of .hgtags is changed '
334 334 '(please commit .hgtags manually)'))
335 335
336 336 self.tags() # instantiate the cache
337 337 self._tag(names, node, message, local, user, date)
338 338
339 339 @propertycache
340 340 def _tagscache(self):
341 341 '''Returns a tagscache object that contains various tags related caches.'''
342 342
343 343 # This simplifies its cache management by having one decorated
344 344 # function (this one) and the rest simply fetch things from it.
345 345 class tagscache(object):
346 346 def __init__(self):
347 347 # These two define the set of tags for this repository. tags
348 348 # maps tag name to node; tagtypes maps tag name to 'global' or
349 349 # 'local'. (Global tags are defined by .hgtags across all
350 350 # heads, and local tags are defined in .hg/localtags.)
351 351 # They constitute the in-memory cache of tags.
352 352 self.tags = self.tagtypes = None
353 353
354 354 self.nodetagscache = self.tagslist = None
355 355
356 356 cache = tagscache()
357 357 cache.tags, cache.tagtypes = self._findtags()
358 358
359 359 return cache
360 360
361 361 def tags(self):
362 362 '''return a mapping of tag to node'''
363 363 return self._tagscache.tags
364 364
365 365 def _findtags(self):
366 366 '''Do the hard work of finding tags. Return a pair of dicts
367 367 (tags, tagtypes) where tags maps tag name to node, and tagtypes
368 368 maps tag name to a string like \'global\' or \'local\'.
369 369 Subclasses or extensions are free to add their own tags, but
370 370 should be aware that the returned dicts will be retained for the
371 371 duration of the localrepo object.'''
372 372
373 373 # XXX what tagtype should subclasses/extensions use? Currently
374 374 # mq and bookmarks add tags, but do not set the tagtype at all.
375 375 # Should each extension invent its own tag type? Should there
376 376 # be one tagtype for all such "virtual" tags? Or is the status
377 377 # quo fine?
378 378
379 379 alltags = {} # map tag name to (node, hist)
380 380 tagtypes = {}
381 381
382 382 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
383 383 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
384 384
385 385 # Build the return dicts. Have to re-encode tag names because
386 386 # the tags module always uses UTF-8 (in order not to lose info
387 387 # writing to the cache), but the rest of Mercurial wants them in
388 388 # local encoding.
389 389 tags = {}
390 390 for (name, (node, hist)) in alltags.iteritems():
391 391 if node != nullid:
392 392 try:
393 393 # ignore tags to unknown nodes
394 394 self.changelog.lookup(node)
395 395 tags[encoding.tolocal(name)] = node
396 396 except error.LookupError:
397 397 pass
398 398 tags['tip'] = self.changelog.tip()
399 399 tagtypes = dict([(encoding.tolocal(name), value)
400 400 for (name, value) in tagtypes.iteritems()])
401 401 return (tags, tagtypes)
402 402
403 403 def tagtype(self, tagname):
404 404 '''
405 405 return the type of the given tag. result can be:
406 406
407 407 'local' : a local tag
408 408 'global' : a global tag
409 409 None : tag does not exist
410 410 '''
411 411
412 412 return self._tagscache.tagtypes.get(tagname)
413 413
414 414 def tagslist(self):
415 415 '''return a list of tags ordered by revision'''
416 416 if not self._tagscache.tagslist:
417 417 l = []
418 418 for t, n in self.tags().iteritems():
419 419 r = self.changelog.rev(n)
420 420 l.append((r, t, n))
421 421 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
422 422
423 423 return self._tagscache.tagslist
424 424
425 425 def nodetags(self, node):
426 426 '''return the tags associated with a node'''
427 427 if not self._tagscache.nodetagscache:
428 428 nodetagscache = {}
429 429 for t, n in self.tags().iteritems():
430 430 nodetagscache.setdefault(n, []).append(t)
431 431 for tags in nodetagscache.itervalues():
432 432 tags.sort()
433 433 self._tagscache.nodetagscache = nodetagscache
434 434 return self._tagscache.nodetagscache.get(node, [])
435 435
436 436 def nodebookmarks(self, node):
437 437 marks = []
438 438 for bookmark, n in self._bookmarks.iteritems():
439 439 if n == node:
440 440 marks.append(bookmark)
441 441 return sorted(marks)
442 442
443 443 def _branchtags(self, partial, lrev):
444 444 # TODO: rename this function?
445 445 tiprev = len(self) - 1
446 446 if lrev != tiprev:
447 447 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
448 448 self._updatebranchcache(partial, ctxgen)
449 449 self._writebranchcache(partial, self.changelog.tip(), tiprev)
450 450
451 451 return partial
452 452
453 453 def updatebranchcache(self):
454 454 tip = self.changelog.tip()
455 455 if self._branchcache is not None and self._branchcachetip == tip:
456 456 return self._branchcache
457 457
458 458 oldtip = self._branchcachetip
459 459 self._branchcachetip = tip
460 460 if oldtip is None or oldtip not in self.changelog.nodemap:
461 461 partial, last, lrev = self._readbranchcache()
462 462 else:
463 463 lrev = self.changelog.rev(oldtip)
464 464 partial = self._branchcache
465 465
466 466 self._branchtags(partial, lrev)
467 467 # this private cache holds all heads (not just tips)
468 468 self._branchcache = partial
469 469
470 470 def branchmap(self):
471 471 '''returns a dictionary {branch: [branchheads]}'''
472 472 self.updatebranchcache()
473 473 return self._branchcache
474 474
475 475 def branchtags(self):
476 476 '''return a dict where branch names map to the tipmost head of
477 477 the branch, open heads come before closed'''
478 478 bt = {}
479 479 for bn, heads in self.branchmap().iteritems():
480 480 tip = heads[-1]
481 481 for h in reversed(heads):
482 482 if 'close' not in self.changelog.read(h)[5]:
483 483 tip = h
484 484 break
485 485 bt[bn] = tip
486 486 return bt
487 487
488 488 def _readbranchcache(self):
489 489 partial = {}
490 490 try:
491 491 f = self.opener("cache/branchheads")
492 492 lines = f.read().split('\n')
493 493 f.close()
494 494 except (IOError, OSError):
495 495 return {}, nullid, nullrev
496 496
497 497 try:
498 498 last, lrev = lines.pop(0).split(" ", 1)
499 499 last, lrev = bin(last), int(lrev)
500 500 if lrev >= len(self) or self[lrev].node() != last:
501 501 # invalidate the cache
502 502 raise ValueError('invalidating branch cache (tip differs)')
503 503 for l in lines:
504 504 if not l:
505 505 continue
506 506 node, label = l.split(" ", 1)
507 507 label = encoding.tolocal(label.strip())
508 508 partial.setdefault(label, []).append(bin(node))
509 509 except KeyboardInterrupt:
510 510 raise
511 511 except Exception, inst:
512 512 if self.ui.debugflag:
513 513 self.ui.warn(str(inst), '\n')
514 514 partial, last, lrev = {}, nullid, nullrev
515 515 return partial, last, lrev
516 516
517 517 def _writebranchcache(self, branches, tip, tiprev):
518 518 try:
519 519 f = self.opener("cache/branchheads", "w", atomictemp=True)
520 520 f.write("%s %s\n" % (hex(tip), tiprev))
521 521 for label, nodes in branches.iteritems():
522 522 for node in nodes:
523 523 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
524 524 f.close()
525 525 except (IOError, OSError):
526 526 pass
527 527
528 528 def _updatebranchcache(self, partial, ctxgen):
529 529 # collect new branch entries
530 530 newbranches = {}
531 531 for c in ctxgen:
532 532 newbranches.setdefault(c.branch(), []).append(c.node())
533 533 # if older branchheads are reachable from new ones, they aren't
534 534 # really branchheads. Note checking parents is insufficient:
535 535 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
536 536 for branch, newnodes in newbranches.iteritems():
537 537 bheads = partial.setdefault(branch, [])
538 538 bheads.extend(newnodes)
539 539 if len(bheads) <= 1:
540 540 continue
541 541 bheads = sorted(bheads, key=lambda x: self[x].rev())
542 542 # starting from tip means fewer passes over reachable
543 543 while newnodes:
544 544 latest = newnodes.pop()
545 545 if latest not in bheads:
546 546 continue
547 547 minbhrev = self[bheads[0]].node()
548 548 reachable = self.changelog.reachable(latest, minbhrev)
549 549 reachable.remove(latest)
550 550 if reachable:
551 551 bheads = [b for b in bheads if b not in reachable]
552 552 partial[branch] = bheads
553 553
554 554 def lookup(self, key):
555 555 if isinstance(key, int):
556 556 return self.changelog.node(key)
557 557 elif key == '.':
558 558 return self.dirstate.p1()
559 559 elif key == 'null':
560 560 return nullid
561 561 elif key == 'tip':
562 562 return self.changelog.tip()
563 563 n = self.changelog._match(key)
564 564 if n:
565 565 return n
566 566 if key in self._bookmarks:
567 567 return self._bookmarks[key]
568 568 if key in self.tags():
569 569 return self.tags()[key]
570 570 if key in self.branchtags():
571 571 return self.branchtags()[key]
572 572 n = self.changelog._partialmatch(key)
573 573 if n:
574 574 return n
575 575
576 576 # can't find key, check if it might have come from damaged dirstate
577 577 if key in self.dirstate.parents():
578 578 raise error.Abort(_("working directory has unknown parent '%s'!")
579 579 % short(key))
580 580 try:
581 581 if len(key) == 20:
582 582 key = hex(key)
583 583 except TypeError:
584 584 pass
585 585 raise error.RepoLookupError(_("unknown revision '%s'") % key)
586 586
587 587 def lookupbranch(self, key, remote=None):
588 588 repo = remote or self
589 589 if key in repo.branchmap():
590 590 return key
591 591
592 592 repo = (remote and remote.local()) and remote or self
593 593 return repo[key].branch()
594 594
595 595 def known(self, nodes):
596 596 nm = self.changelog.nodemap
597 597 return [(n in nm) for n in nodes]
598 598
599 599 def local(self):
600 600 return self
601 601
602 602 def join(self, f):
603 603 return os.path.join(self.path, f)
604 604
605 605 def wjoin(self, f):
606 606 return os.path.join(self.root, f)
607 607
608 608 def file(self, f):
609 609 if f[0] == '/':
610 610 f = f[1:]
611 611 return filelog.filelog(self.sopener, f)
612 612
613 613 def changectx(self, changeid):
614 614 return self[changeid]
615 615
616 616 def parents(self, changeid=None):
617 617 '''get list of changectxs for parents of changeid'''
618 618 return self[changeid].parents()
619 619
620 620 def filectx(self, path, changeid=None, fileid=None):
621 621 """changeid can be a changeset revision, node, or tag.
622 622 fileid can be a file revision or node."""
623 623 return context.filectx(self, path, changeid, fileid)
624 624
625 625 def getcwd(self):
626 626 return self.dirstate.getcwd()
627 627
628 628 def pathto(self, f, cwd=None):
629 629 return self.dirstate.pathto(f, cwd)
630 630
631 631 def wfile(self, f, mode='r'):
632 632 return self.wopener(f, mode)
633 633
634 634 def _link(self, f):
635 635 return os.path.islink(self.wjoin(f))
636 636
637 637 def _loadfilter(self, filter):
638 638 if filter not in self.filterpats:
639 639 l = []
640 640 for pat, cmd in self.ui.configitems(filter):
641 641 if cmd == '!':
642 642 continue
643 643 mf = matchmod.match(self.root, '', [pat])
644 644 fn = None
645 645 params = cmd
646 646 for name, filterfn in self._datafilters.iteritems():
647 647 if cmd.startswith(name):
648 648 fn = filterfn
649 649 params = cmd[len(name):].lstrip()
650 650 break
651 651 if not fn:
652 652 fn = lambda s, c, **kwargs: util.filter(s, c)
653 653 # Wrap old filters not supporting keyword arguments
654 654 if not inspect.getargspec(fn)[2]:
655 655 oldfn = fn
656 656 fn = lambda s, c, **kwargs: oldfn(s, c)
657 657 l.append((mf, fn, params))
658 658 self.filterpats[filter] = l
659 659 return self.filterpats[filter]
660 660
661 661 def _filter(self, filterpats, filename, data):
662 662 for mf, fn, cmd in filterpats:
663 663 if mf(filename):
664 664 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
665 665 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
666 666 break
667 667
668 668 return data
669 669
670 670 @propertycache
671 671 def _encodefilterpats(self):
672 672 return self._loadfilter('encode')
673 673
674 674 @propertycache
675 675 def _decodefilterpats(self):
676 676 return self._loadfilter('decode')
677 677
678 678 def adddatafilter(self, name, filter):
679 679 self._datafilters[name] = filter
680 680
681 681 def wread(self, filename):
682 682 if self._link(filename):
683 683 data = os.readlink(self.wjoin(filename))
684 684 else:
685 685 data = self.wopener.read(filename)
686 686 return self._filter(self._encodefilterpats, filename, data)
687 687
688 688 def wwrite(self, filename, data, flags):
689 689 data = self._filter(self._decodefilterpats, filename, data)
690 690 if 'l' in flags:
691 691 self.wopener.symlink(data, filename)
692 692 else:
693 693 self.wopener.write(filename, data)
694 694 if 'x' in flags:
695 695 util.setflags(self.wjoin(filename), False, True)
696 696
697 697 def wwritedata(self, filename, data):
698 698 return self._filter(self._decodefilterpats, filename, data)
699 699
700 700 def transaction(self, desc):
701 701 tr = self._transref and self._transref() or None
702 702 if tr and tr.running():
703 703 return tr.nest()
704 704
705 705 # abort here if the journal already exists
706 706 if os.path.exists(self.sjoin("journal")):
707 707 raise error.RepoError(
708 708 _("abandoned transaction found - run hg recover"))
709 709
710 710 journalfiles = self._writejournal(desc)
711 711 renames = [(x, undoname(x)) for x in journalfiles]
712 712
713 713 tr = transaction.transaction(self.ui.warn, self.sopener,
714 714 self.sjoin("journal"),
715 715 aftertrans(renames),
716 716 self.store.createmode)
717 717 self._transref = weakref.ref(tr)
718 718 return tr
719 719
720 720 def _writejournal(self, desc):
721 721 # save dirstate for rollback
722 722 try:
723 723 ds = self.opener.read("dirstate")
724 724 except IOError:
725 725 ds = ""
726 726 self.opener.write("journal.dirstate", ds)
727 727 self.opener.write("journal.branch",
728 728 encoding.fromlocal(self.dirstate.branch()))
729 729 self.opener.write("journal.desc",
730 730 "%d\n%s\n" % (len(self), desc))
731 731
732 732 bkname = self.join('bookmarks')
733 733 if os.path.exists(bkname):
734 734 util.copyfile(bkname, self.join('journal.bookmarks'))
735 735 else:
736 736 self.opener.write('journal.bookmarks', '')
737 737
738 738 return (self.sjoin('journal'), self.join('journal.dirstate'),
739 739 self.join('journal.branch'), self.join('journal.desc'),
740 740 self.join('journal.bookmarks'))
741 741
742 742 def recover(self):
743 743 lock = self.lock()
744 744 try:
745 745 if os.path.exists(self.sjoin("journal")):
746 746 self.ui.status(_("rolling back interrupted transaction\n"))
747 747 transaction.rollback(self.sopener, self.sjoin("journal"),
748 748 self.ui.warn)
749 749 self.invalidate()
750 750 return True
751 751 else:
752 752 self.ui.warn(_("no interrupted transaction available\n"))
753 753 return False
754 754 finally:
755 755 lock.release()
756 756
757 757 def rollback(self, dryrun=False, force=False):
758 758 wlock = lock = None
759 759 try:
760 760 wlock = self.wlock()
761 761 lock = self.lock()
762 762 if os.path.exists(self.sjoin("undo")):
763 763 return self._rollback(dryrun, force)
764 764 else:
765 765 self.ui.warn(_("no rollback information available\n"))
766 766 return 1
767 767 finally:
768 768 release(lock, wlock)
769 769
770 770 def _rollback(self, dryrun, force):
771 771 ui = self.ui
772 772 try:
773 773 args = self.opener.read('undo.desc').splitlines()
774 774 (oldlen, desc, detail) = (int(args[0]), args[1], None)
775 775 if len(args) >= 3:
776 776 detail = args[2]
777 777 oldtip = oldlen - 1
778 778
779 779 if detail and ui.verbose:
780 780 msg = (_('repository tip rolled back to revision %s'
781 781 ' (undo %s: %s)\n')
782 782 % (oldtip, desc, detail))
783 783 else:
784 784 msg = (_('repository tip rolled back to revision %s'
785 785 ' (undo %s)\n')
786 786 % (oldtip, desc))
787 787 except IOError:
788 788 msg = _('rolling back unknown transaction\n')
789 789 desc = None
790 790
791 791 if not force and self['.'] != self['tip'] and desc == 'commit':
792 792 raise util.Abort(
793 793 _('rollback of last commit while not checked out '
794 'may lose data (use -f to force)'))
794 'may lose data'), hint=_('use -f to force'))
795 795
796 796 ui.status(msg)
797 797 if dryrun:
798 798 return 0
799 799
800 800 parents = self.dirstate.parents()
801 801 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
802 802 if os.path.exists(self.join('undo.bookmarks')):
803 803 util.rename(self.join('undo.bookmarks'),
804 804 self.join('bookmarks'))
805 805 self.invalidate()
806 806
807 807 parentgone = (parents[0] not in self.changelog.nodemap or
808 808 parents[1] not in self.changelog.nodemap)
809 809 if parentgone:
810 810 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
811 811 try:
812 812 branch = self.opener.read('undo.branch')
813 813 self.dirstate.setbranch(branch)
814 814 except IOError:
815 815 ui.warn(_('named branch could not be reset: '
816 816 'current branch is still \'%s\'\n')
817 817 % self.dirstate.branch())
818 818
819 819 self.dirstate.invalidate()
820 820 self.destroyed()
821 821 parents = tuple([p.rev() for p in self.parents()])
822 822 if len(parents) > 1:
823 823 ui.status(_('working directory now based on '
824 824 'revisions %d and %d\n') % parents)
825 825 else:
826 826 ui.status(_('working directory now based on '
827 827 'revision %d\n') % parents)
828 828 return 0
829 829
830 830 def invalidatecaches(self):
831 831 try:
832 832 delattr(self, '_tagscache')
833 833 except AttributeError:
834 834 pass
835 835
836 836 self._branchcache = None # in UTF-8
837 837 self._branchcachetip = None
838 838
839 839 def invalidatedirstate(self):
840 840 '''Invalidates the dirstate, causing the next call to dirstate
841 841 to check if it was modified since the last time it was read,
842 842 rereading it if it has.
843 843
844 844 This is different to dirstate.invalidate() that it doesn't always
845 845 rereads the dirstate. Use dirstate.invalidate() if you want to
846 846 explicitly read the dirstate again (i.e. restoring it to a previous
847 847 known good state).'''
848 848 try:
849 849 delattr(self, 'dirstate')
850 850 except AttributeError:
851 851 pass
852 852
853 853 def invalidate(self):
854 854 for k in self._filecache:
855 855 # dirstate is invalidated separately in invalidatedirstate()
856 856 if k == 'dirstate':
857 857 continue
858 858
859 859 try:
860 860 delattr(self, k)
861 861 except AttributeError:
862 862 pass
863 863 self.invalidatecaches()
864 864
865 865 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
866 866 try:
867 867 l = lock.lock(lockname, 0, releasefn, desc=desc)
868 868 except error.LockHeld, inst:
869 869 if not wait:
870 870 raise
871 871 self.ui.warn(_("waiting for lock on %s held by %r\n") %
872 872 (desc, inst.locker))
873 873 # default to 600 seconds timeout
874 874 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
875 875 releasefn, desc=desc)
876 876 if acquirefn:
877 877 acquirefn()
878 878 return l
879 879
880 880 def lock(self, wait=True):
881 881 '''Lock the repository store (.hg/store) and return a weak reference
882 882 to the lock. Use this before modifying the store (e.g. committing or
883 883 stripping). If you are opening a transaction, get a lock as well.)'''
884 884 l = self._lockref and self._lockref()
885 885 if l is not None and l.held:
886 886 l.lock()
887 887 return l
888 888
889 889 def unlock():
890 890 self.store.write()
891 891 for k, ce in self._filecache.items():
892 892 if k == 'dirstate':
893 893 continue
894 894 ce.refresh()
895 895
896 896 l = self._lock(self.sjoin("lock"), wait, unlock,
897 897 self.invalidate, _('repository %s') % self.origroot)
898 898 self._lockref = weakref.ref(l)
899 899 return l
900 900
901 901 def wlock(self, wait=True):
902 902 '''Lock the non-store parts of the repository (everything under
903 903 .hg except .hg/store) and return a weak reference to the lock.
904 904 Use this before modifying files in .hg.'''
905 905 l = self._wlockref and self._wlockref()
906 906 if l is not None and l.held:
907 907 l.lock()
908 908 return l
909 909
910 910 def unlock():
911 911 self.dirstate.write()
912 912 ce = self._filecache.get('dirstate')
913 913 if ce:
914 914 ce.refresh()
915 915
916 916 l = self._lock(self.join("wlock"), wait, unlock,
917 917 self.invalidatedirstate, _('working directory of %s') %
918 918 self.origroot)
919 919 self._wlockref = weakref.ref(l)
920 920 return l
921 921
922 922 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
923 923 """
924 924 commit an individual file as part of a larger transaction
925 925 """
926 926
927 927 fname = fctx.path()
928 928 text = fctx.data()
929 929 flog = self.file(fname)
930 930 fparent1 = manifest1.get(fname, nullid)
931 931 fparent2 = fparent2o = manifest2.get(fname, nullid)
932 932
933 933 meta = {}
934 934 copy = fctx.renamed()
935 935 if copy and copy[0] != fname:
936 936 # Mark the new revision of this file as a copy of another
937 937 # file. This copy data will effectively act as a parent
938 938 # of this new revision. If this is a merge, the first
939 939 # parent will be the nullid (meaning "look up the copy data")
940 940 # and the second one will be the other parent. For example:
941 941 #
942 942 # 0 --- 1 --- 3 rev1 changes file foo
943 943 # \ / rev2 renames foo to bar and changes it
944 944 # \- 2 -/ rev3 should have bar with all changes and
945 945 # should record that bar descends from
946 946 # bar in rev2 and foo in rev1
947 947 #
948 948 # this allows this merge to succeed:
949 949 #
950 950 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
951 951 # \ / merging rev3 and rev4 should use bar@rev2
952 952 # \- 2 --- 4 as the merge base
953 953 #
954 954
955 955 cfname = copy[0]
956 956 crev = manifest1.get(cfname)
957 957 newfparent = fparent2
958 958
959 959 if manifest2: # branch merge
960 960 if fparent2 == nullid or crev is None: # copied on remote side
961 961 if cfname in manifest2:
962 962 crev = manifest2[cfname]
963 963 newfparent = fparent1
964 964
965 965 # find source in nearest ancestor if we've lost track
966 966 if not crev:
967 967 self.ui.debug(" %s: searching for copy revision for %s\n" %
968 968 (fname, cfname))
969 969 for ancestor in self[None].ancestors():
970 970 if cfname in ancestor:
971 971 crev = ancestor[cfname].filenode()
972 972 break
973 973
974 974 if crev:
975 975 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
976 976 meta["copy"] = cfname
977 977 meta["copyrev"] = hex(crev)
978 978 fparent1, fparent2 = nullid, newfparent
979 979 else:
980 980 self.ui.warn(_("warning: can't find ancestor for '%s' "
981 981 "copied from '%s'!\n") % (fname, cfname))
982 982
983 983 elif fparent2 != nullid:
984 984 # is one parent an ancestor of the other?
985 985 fparentancestor = flog.ancestor(fparent1, fparent2)
986 986 if fparentancestor == fparent1:
987 987 fparent1, fparent2 = fparent2, nullid
988 988 elif fparentancestor == fparent2:
989 989 fparent2 = nullid
990 990
991 991 # is the file changed?
992 992 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
993 993 changelist.append(fname)
994 994 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
995 995
996 996 # are just the flags changed during merge?
997 997 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
998 998 changelist.append(fname)
999 999
1000 1000 return fparent1
1001 1001
1002 1002 def commit(self, text="", user=None, date=None, match=None, force=False,
1003 1003 editor=False, extra={}):
1004 1004 """Add a new revision to current repository.
1005 1005
1006 1006 Revision information is gathered from the working directory,
1007 1007 match can be used to filter the committed files. If editor is
1008 1008 supplied, it is called to get a commit message.
1009 1009 """
1010 1010
1011 1011 def fail(f, msg):
1012 1012 raise util.Abort('%s: %s' % (f, msg))
1013 1013
1014 1014 if not match:
1015 1015 match = matchmod.always(self.root, '')
1016 1016
1017 1017 if not force:
1018 1018 vdirs = []
1019 1019 match.dir = vdirs.append
1020 1020 match.bad = fail
1021 1021
1022 1022 wlock = self.wlock()
1023 1023 try:
1024 1024 wctx = self[None]
1025 1025 merge = len(wctx.parents()) > 1
1026 1026
1027 1027 if (not force and merge and match and
1028 1028 (match.files() or match.anypats())):
1029 1029 raise util.Abort(_('cannot partially commit a merge '
1030 1030 '(do not specify files or patterns)'))
1031 1031
1032 1032 changes = self.status(match=match, clean=force)
1033 1033 if force:
1034 1034 changes[0].extend(changes[6]) # mq may commit unchanged files
1035 1035
1036 1036 # check subrepos
1037 1037 subs = []
1038 1038 removedsubs = set()
1039 1039 if '.hgsub' in wctx:
1040 1040 # only manage subrepos and .hgsubstate if .hgsub is present
1041 1041 for p in wctx.parents():
1042 1042 removedsubs.update(s for s in p.substate if match(s))
1043 1043 for s in wctx.substate:
1044 1044 removedsubs.discard(s)
1045 1045 if match(s) and wctx.sub(s).dirty():
1046 1046 subs.append(s)
1047 1047 if (subs or removedsubs):
1048 1048 if (not match('.hgsub') and
1049 1049 '.hgsub' in (wctx.modified() + wctx.added())):
1050 1050 raise util.Abort(
1051 1051 _("can't commit subrepos without .hgsub"))
1052 1052 if '.hgsubstate' not in changes[0]:
1053 1053 changes[0].insert(0, '.hgsubstate')
1054 1054 if '.hgsubstate' in changes[2]:
1055 1055 changes[2].remove('.hgsubstate')
1056 1056 elif '.hgsub' in changes[2]:
1057 1057 # clean up .hgsubstate when .hgsub is removed
1058 1058 if ('.hgsubstate' in wctx and
1059 1059 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1060 1060 changes[2].insert(0, '.hgsubstate')
1061 1061
1062 1062 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1063 1063 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1064 1064 if changedsubs:
1065 1065 raise util.Abort(_("uncommitted changes in subrepo %s")
1066 1066 % changedsubs[0])
1067 1067
1068 1068 # make sure all explicit patterns are matched
1069 1069 if not force and match.files():
1070 1070 matched = set(changes[0] + changes[1] + changes[2])
1071 1071
1072 1072 for f in match.files():
1073 1073 if f == '.' or f in matched or f in wctx.substate:
1074 1074 continue
1075 1075 if f in changes[3]: # missing
1076 1076 fail(f, _('file not found!'))
1077 1077 if f in vdirs: # visited directory
1078 1078 d = f + '/'
1079 1079 for mf in matched:
1080 1080 if mf.startswith(d):
1081 1081 break
1082 1082 else:
1083 1083 fail(f, _("no match under directory!"))
1084 1084 elif f not in self.dirstate:
1085 1085 fail(f, _("file not tracked!"))
1086 1086
1087 1087 if (not force and not extra.get("close") and not merge
1088 1088 and not (changes[0] or changes[1] or changes[2])
1089 1089 and wctx.branch() == wctx.p1().branch()):
1090 1090 return None
1091 1091
1092 1092 ms = mergemod.mergestate(self)
1093 1093 for f in changes[0]:
1094 1094 if f in ms and ms[f] == 'u':
1095 1095 raise util.Abort(_("unresolved merge conflicts "
1096 1096 "(see hg help resolve)"))
1097 1097
1098 1098 cctx = context.workingctx(self, text, user, date, extra, changes)
1099 1099 if editor:
1100 1100 cctx._text = editor(self, cctx, subs)
1101 1101 edited = (text != cctx._text)
1102 1102
1103 1103 # commit subs
1104 1104 if subs or removedsubs:
1105 1105 state = wctx.substate.copy()
1106 1106 for s in sorted(subs):
1107 1107 sub = wctx.sub(s)
1108 1108 self.ui.status(_('committing subrepository %s\n') %
1109 1109 subrepo.subrelpath(sub))
1110 1110 sr = sub.commit(cctx._text, user, date)
1111 1111 state[s] = (state[s][0], sr)
1112 1112 subrepo.writestate(self, state)
1113 1113
1114 1114 # Save commit message in case this transaction gets rolled back
1115 1115 # (e.g. by a pretxncommit hook). Leave the content alone on
1116 1116 # the assumption that the user will use the same editor again.
1117 1117 msgfn = self.savecommitmessage(cctx._text)
1118 1118
1119 1119 p1, p2 = self.dirstate.parents()
1120 1120 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1121 1121 try:
1122 1122 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1123 1123 ret = self.commitctx(cctx, True)
1124 1124 except:
1125 1125 if edited:
1126 1126 self.ui.write(
1127 1127 _('note: commit message saved in %s\n') % msgfn)
1128 1128 raise
1129 1129
1130 1130 # update bookmarks, dirstate and mergestate
1131 1131 bookmarks.update(self, p1, ret)
1132 1132 for f in changes[0] + changes[1]:
1133 1133 self.dirstate.normal(f)
1134 1134 for f in changes[2]:
1135 1135 self.dirstate.drop(f)
1136 1136 self.dirstate.setparents(ret)
1137 1137 ms.reset()
1138 1138 finally:
1139 1139 wlock.release()
1140 1140
1141 1141 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1142 1142 return ret
1143 1143
1144 1144 def commitctx(self, ctx, error=False):
1145 1145 """Add a new revision to current repository.
1146 1146 Revision information is passed via the context argument.
1147 1147 """
1148 1148
1149 1149 tr = lock = None
1150 1150 removed = list(ctx.removed())
1151 1151 p1, p2 = ctx.p1(), ctx.p2()
1152 1152 user = ctx.user()
1153 1153
1154 1154 lock = self.lock()
1155 1155 try:
1156 1156 tr = self.transaction("commit")
1157 1157 trp = weakref.proxy(tr)
1158 1158
1159 1159 if ctx.files():
1160 1160 m1 = p1.manifest().copy()
1161 1161 m2 = p2.manifest()
1162 1162
1163 1163 # check in files
1164 1164 new = {}
1165 1165 changed = []
1166 1166 linkrev = len(self)
1167 1167 for f in sorted(ctx.modified() + ctx.added()):
1168 1168 self.ui.note(f + "\n")
1169 1169 try:
1170 1170 fctx = ctx[f]
1171 1171 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1172 1172 changed)
1173 1173 m1.set(f, fctx.flags())
1174 1174 except OSError, inst:
1175 1175 self.ui.warn(_("trouble committing %s!\n") % f)
1176 1176 raise
1177 1177 except IOError, inst:
1178 1178 errcode = getattr(inst, 'errno', errno.ENOENT)
1179 1179 if error or errcode and errcode != errno.ENOENT:
1180 1180 self.ui.warn(_("trouble committing %s!\n") % f)
1181 1181 raise
1182 1182 else:
1183 1183 removed.append(f)
1184 1184
1185 1185 # update manifest
1186 1186 m1.update(new)
1187 1187 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1188 1188 drop = [f for f in removed if f in m1]
1189 1189 for f in drop:
1190 1190 del m1[f]
1191 1191 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1192 1192 p2.manifestnode(), (new, drop))
1193 1193 files = changed + removed
1194 1194 else:
1195 1195 mn = p1.manifestnode()
1196 1196 files = []
1197 1197
1198 1198 # update changelog
1199 1199 self.changelog.delayupdate()
1200 1200 n = self.changelog.add(mn, files, ctx.description(),
1201 1201 trp, p1.node(), p2.node(),
1202 1202 user, ctx.date(), ctx.extra().copy())
1203 1203 p = lambda: self.changelog.writepending() and self.root or ""
1204 1204 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1205 1205 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1206 1206 parent2=xp2, pending=p)
1207 1207 self.changelog.finalize(trp)
1208 1208 tr.close()
1209 1209
1210 1210 if self._branchcache:
1211 1211 self.updatebranchcache()
1212 1212 return n
1213 1213 finally:
1214 1214 if tr:
1215 1215 tr.release()
1216 1216 lock.release()
1217 1217
1218 1218 def destroyed(self):
1219 1219 '''Inform the repository that nodes have been destroyed.
1220 1220 Intended for use by strip and rollback, so there's a common
1221 1221 place for anything that has to be done after destroying history.'''
1222 1222 # XXX it might be nice if we could take the list of destroyed
1223 1223 # nodes, but I don't see an easy way for rollback() to do that
1224 1224
1225 1225 # Ensure the persistent tag cache is updated. Doing it now
1226 1226 # means that the tag cache only has to worry about destroyed
1227 1227 # heads immediately after a strip/rollback. That in turn
1228 1228 # guarantees that "cachetip == currenttip" (comparing both rev
1229 1229 # and node) always means no nodes have been added or destroyed.
1230 1230
1231 1231 # XXX this is suboptimal when qrefresh'ing: we strip the current
1232 1232 # head, refresh the tag cache, then immediately add a new head.
1233 1233 # But I think doing it this way is necessary for the "instant
1234 1234 # tag cache retrieval" case to work.
1235 1235 self.invalidatecaches()
1236 1236
1237 1237 def walk(self, match, node=None):
1238 1238 '''
1239 1239 walk recursively through the directory tree or a given
1240 1240 changeset, finding all files matched by the match
1241 1241 function
1242 1242 '''
1243 1243 return self[node].walk(match)
1244 1244
1245 1245 def status(self, node1='.', node2=None, match=None,
1246 1246 ignored=False, clean=False, unknown=False,
1247 1247 listsubrepos=False):
1248 1248 """return status of files between two nodes or node and working directory
1249 1249
1250 1250 If node1 is None, use the first dirstate parent instead.
1251 1251 If node2 is None, compare node1 with working directory.
1252 1252 """
1253 1253
1254 1254 def mfmatches(ctx):
1255 1255 mf = ctx.manifest().copy()
1256 1256 for fn in mf.keys():
1257 1257 if not match(fn):
1258 1258 del mf[fn]
1259 1259 return mf
1260 1260
1261 1261 if isinstance(node1, context.changectx):
1262 1262 ctx1 = node1
1263 1263 else:
1264 1264 ctx1 = self[node1]
1265 1265 if isinstance(node2, context.changectx):
1266 1266 ctx2 = node2
1267 1267 else:
1268 1268 ctx2 = self[node2]
1269 1269
1270 1270 working = ctx2.rev() is None
1271 1271 parentworking = working and ctx1 == self['.']
1272 1272 match = match or matchmod.always(self.root, self.getcwd())
1273 1273 listignored, listclean, listunknown = ignored, clean, unknown
1274 1274
1275 1275 # load earliest manifest first for caching reasons
1276 1276 if not working and ctx2.rev() < ctx1.rev():
1277 1277 ctx2.manifest()
1278 1278
1279 1279 if not parentworking:
1280 1280 def bad(f, msg):
1281 1281 if f not in ctx1:
1282 1282 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1283 1283 match.bad = bad
1284 1284
1285 1285 if working: # we need to scan the working dir
1286 1286 subrepos = []
1287 1287 if '.hgsub' in self.dirstate:
1288 1288 subrepos = ctx2.substate.keys()
1289 1289 s = self.dirstate.status(match, subrepos, listignored,
1290 1290 listclean, listunknown)
1291 1291 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1292 1292
1293 1293 # check for any possibly clean files
1294 1294 if parentworking and cmp:
1295 1295 fixup = []
1296 1296 # do a full compare of any files that might have changed
1297 1297 for f in sorted(cmp):
1298 1298 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1299 1299 or ctx1[f].cmp(ctx2[f])):
1300 1300 modified.append(f)
1301 1301 else:
1302 1302 fixup.append(f)
1303 1303
1304 1304 # update dirstate for files that are actually clean
1305 1305 if fixup:
1306 1306 if listclean:
1307 1307 clean += fixup
1308 1308
1309 1309 try:
1310 1310 # updating the dirstate is optional
1311 1311 # so we don't wait on the lock
1312 1312 wlock = self.wlock(False)
1313 1313 try:
1314 1314 for f in fixup:
1315 1315 self.dirstate.normal(f)
1316 1316 finally:
1317 1317 wlock.release()
1318 1318 except error.LockError:
1319 1319 pass
1320 1320
1321 1321 if not parentworking:
1322 1322 mf1 = mfmatches(ctx1)
1323 1323 if working:
1324 1324 # we are comparing working dir against non-parent
1325 1325 # generate a pseudo-manifest for the working dir
1326 1326 mf2 = mfmatches(self['.'])
1327 1327 for f in cmp + modified + added:
1328 1328 mf2[f] = None
1329 1329 mf2.set(f, ctx2.flags(f))
1330 1330 for f in removed:
1331 1331 if f in mf2:
1332 1332 del mf2[f]
1333 1333 else:
1334 1334 # we are comparing two revisions
1335 1335 deleted, unknown, ignored = [], [], []
1336 1336 mf2 = mfmatches(ctx2)
1337 1337
1338 1338 modified, added, clean = [], [], []
1339 1339 for fn in mf2:
1340 1340 if fn in mf1:
1341 1341 if (fn not in deleted and
1342 1342 (mf1.flags(fn) != mf2.flags(fn) or
1343 1343 (mf1[fn] != mf2[fn] and
1344 1344 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1345 1345 modified.append(fn)
1346 1346 elif listclean:
1347 1347 clean.append(fn)
1348 1348 del mf1[fn]
1349 1349 elif fn not in deleted:
1350 1350 added.append(fn)
1351 1351 removed = mf1.keys()
1352 1352
1353 1353 r = modified, added, removed, deleted, unknown, ignored, clean
1354 1354
1355 1355 if listsubrepos:
1356 1356 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1357 1357 if working:
1358 1358 rev2 = None
1359 1359 else:
1360 1360 rev2 = ctx2.substate[subpath][1]
1361 1361 try:
1362 1362 submatch = matchmod.narrowmatcher(subpath, match)
1363 1363 s = sub.status(rev2, match=submatch, ignored=listignored,
1364 1364 clean=listclean, unknown=listunknown,
1365 1365 listsubrepos=True)
1366 1366 for rfiles, sfiles in zip(r, s):
1367 1367 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1368 1368 except error.LookupError:
1369 1369 self.ui.status(_("skipping missing subrepository: %s\n")
1370 1370 % subpath)
1371 1371
1372 1372 for l in r:
1373 1373 l.sort()
1374 1374 return r
1375 1375
1376 1376 def heads(self, start=None):
1377 1377 heads = self.changelog.heads(start)
1378 1378 # sort the output in rev descending order
1379 1379 return sorted(heads, key=self.changelog.rev, reverse=True)
1380 1380
1381 1381 def branchheads(self, branch=None, start=None, closed=False):
1382 1382 '''return a (possibly filtered) list of heads for the given branch
1383 1383
1384 1384 Heads are returned in topological order, from newest to oldest.
1385 1385 If branch is None, use the dirstate branch.
1386 1386 If start is not None, return only heads reachable from start.
1387 1387 If closed is True, return heads that are marked as closed as well.
1388 1388 '''
1389 1389 if branch is None:
1390 1390 branch = self[None].branch()
1391 1391 branches = self.branchmap()
1392 1392 if branch not in branches:
1393 1393 return []
1394 1394 # the cache returns heads ordered lowest to highest
1395 1395 bheads = list(reversed(branches[branch]))
1396 1396 if start is not None:
1397 1397 # filter out the heads that cannot be reached from startrev
1398 1398 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1399 1399 bheads = [h for h in bheads if h in fbheads]
1400 1400 if not closed:
1401 1401 bheads = [h for h in bheads if
1402 1402 ('close' not in self.changelog.read(h)[5])]
1403 1403 return bheads
1404 1404
1405 1405 def branches(self, nodes):
1406 1406 if not nodes:
1407 1407 nodes = [self.changelog.tip()]
1408 1408 b = []
1409 1409 for n in nodes:
1410 1410 t = n
1411 1411 while True:
1412 1412 p = self.changelog.parents(n)
1413 1413 if p[1] != nullid or p[0] == nullid:
1414 1414 b.append((t, n, p[0], p[1]))
1415 1415 break
1416 1416 n = p[0]
1417 1417 return b
1418 1418
1419 1419 def between(self, pairs):
1420 1420 r = []
1421 1421
1422 1422 for top, bottom in pairs:
1423 1423 n, l, i = top, [], 0
1424 1424 f = 1
1425 1425
1426 1426 while n != bottom and n != nullid:
1427 1427 p = self.changelog.parents(n)[0]
1428 1428 if i == f:
1429 1429 l.append(n)
1430 1430 f = f * 2
1431 1431 n = p
1432 1432 i += 1
1433 1433
1434 1434 r.append(l)
1435 1435
1436 1436 return r
1437 1437
1438 1438 def pull(self, remote, heads=None, force=False):
1439 1439 lock = self.lock()
1440 1440 try:
1441 1441 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1442 1442 force=force)
1443 1443 common, fetch, rheads = tmp
1444 1444 if not fetch:
1445 1445 self.ui.status(_("no changes found\n"))
1446 1446 result = 0
1447 1447 else:
1448 1448 if heads is None and list(common) == [nullid]:
1449 1449 self.ui.status(_("requesting all changes\n"))
1450 1450 elif heads is None and remote.capable('changegroupsubset'):
1451 1451 # issue1320, avoid a race if remote changed after discovery
1452 1452 heads = rheads
1453 1453
1454 1454 if remote.capable('getbundle'):
1455 1455 cg = remote.getbundle('pull', common=common,
1456 1456 heads=heads or rheads)
1457 1457 elif heads is None:
1458 1458 cg = remote.changegroup(fetch, 'pull')
1459 1459 elif not remote.capable('changegroupsubset'):
1460 1460 raise util.Abort(_("partial pull cannot be done because "
1461 1461 "other repository doesn't support "
1462 1462 "changegroupsubset."))
1463 1463 else:
1464 1464 cg = remote.changegroupsubset(fetch, heads, 'pull')
1465 1465 result = self.addchangegroup(cg, 'pull', remote.url(),
1466 1466 lock=lock)
1467 1467 finally:
1468 1468 lock.release()
1469 1469
1470 1470 return result
1471 1471
1472 1472 def checkpush(self, force, revs):
1473 1473 """Extensions can override this function if additional checks have
1474 1474 to be performed before pushing, or call it if they override push
1475 1475 command.
1476 1476 """
1477 1477 pass
1478 1478
1479 1479 def push(self, remote, force=False, revs=None, newbranch=False):
1480 1480 '''Push outgoing changesets (limited by revs) from the current
1481 1481 repository to remote. Return an integer:
1482 1482 - 0 means HTTP error *or* nothing to push
1483 1483 - 1 means we pushed and remote head count is unchanged *or*
1484 1484 we have outgoing changesets but refused to push
1485 1485 - other values as described by addchangegroup()
1486 1486 '''
1487 1487 # there are two ways to push to remote repo:
1488 1488 #
1489 1489 # addchangegroup assumes local user can lock remote
1490 1490 # repo (local filesystem, old ssh servers).
1491 1491 #
1492 1492 # unbundle assumes local user cannot lock remote repo (new ssh
1493 1493 # servers, http servers).
1494 1494
1495 1495 self.checkpush(force, revs)
1496 1496 lock = None
1497 1497 unbundle = remote.capable('unbundle')
1498 1498 if not unbundle:
1499 1499 lock = remote.lock()
1500 1500 try:
1501 1501 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1502 1502 newbranch)
1503 1503 ret = remote_heads
1504 1504 if cg is not None:
1505 1505 if unbundle:
1506 1506 # local repo finds heads on server, finds out what
1507 1507 # revs it must push. once revs transferred, if server
1508 1508 # finds it has different heads (someone else won
1509 1509 # commit/push race), server aborts.
1510 1510 if force:
1511 1511 remote_heads = ['force']
1512 1512 # ssh: return remote's addchangegroup()
1513 1513 # http: return remote's addchangegroup() or 0 for error
1514 1514 ret = remote.unbundle(cg, remote_heads, 'push')
1515 1515 else:
1516 1516 # we return an integer indicating remote head count change
1517 1517 ret = remote.addchangegroup(cg, 'push', self.url(),
1518 1518 lock=lock)
1519 1519 finally:
1520 1520 if lock is not None:
1521 1521 lock.release()
1522 1522
1523 1523 self.ui.debug("checking for updated bookmarks\n")
1524 1524 rb = remote.listkeys('bookmarks')
1525 1525 for k in rb.keys():
1526 1526 if k in self._bookmarks:
1527 1527 nr, nl = rb[k], hex(self._bookmarks[k])
1528 1528 if nr in self:
1529 1529 cr = self[nr]
1530 1530 cl = self[nl]
1531 1531 if cl in cr.descendants():
1532 1532 r = remote.pushkey('bookmarks', k, nr, nl)
1533 1533 if r:
1534 1534 self.ui.status(_("updating bookmark %s\n") % k)
1535 1535 else:
1536 1536 self.ui.warn(_('updating bookmark %s'
1537 1537 ' failed!\n') % k)
1538 1538
1539 1539 return ret
1540 1540
1541 1541 def changegroupinfo(self, nodes, source):
1542 1542 if self.ui.verbose or source == 'bundle':
1543 1543 self.ui.status(_("%d changesets found\n") % len(nodes))
1544 1544 if self.ui.debugflag:
1545 1545 self.ui.debug("list of changesets:\n")
1546 1546 for node in nodes:
1547 1547 self.ui.debug("%s\n" % hex(node))
1548 1548
1549 1549 def changegroupsubset(self, bases, heads, source):
1550 1550 """Compute a changegroup consisting of all the nodes that are
1551 1551 descendants of any of the bases and ancestors of any of the heads.
1552 1552 Return a chunkbuffer object whose read() method will return
1553 1553 successive changegroup chunks.
1554 1554
1555 1555 It is fairly complex as determining which filenodes and which
1556 1556 manifest nodes need to be included for the changeset to be complete
1557 1557 is non-trivial.
1558 1558
1559 1559 Another wrinkle is doing the reverse, figuring out which changeset in
1560 1560 the changegroup a particular filenode or manifestnode belongs to.
1561 1561 """
1562 1562 cl = self.changelog
1563 1563 if not bases:
1564 1564 bases = [nullid]
1565 1565 csets, bases, heads = cl.nodesbetween(bases, heads)
1566 1566 # We assume that all ancestors of bases are known
1567 1567 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1568 1568 return self._changegroupsubset(common, csets, heads, source)
1569 1569
1570 1570 def getbundle(self, source, heads=None, common=None):
1571 1571 """Like changegroupsubset, but returns the set difference between the
1572 1572 ancestors of heads and the ancestors common.
1573 1573
1574 1574 If heads is None, use the local heads. If common is None, use [nullid].
1575 1575
1576 1576 The nodes in common might not all be known locally due to the way the
1577 1577 current discovery protocol works.
1578 1578 """
1579 1579 cl = self.changelog
1580 1580 if common:
1581 1581 nm = cl.nodemap
1582 1582 common = [n for n in common if n in nm]
1583 1583 else:
1584 1584 common = [nullid]
1585 1585 if not heads:
1586 1586 heads = cl.heads()
1587 1587 common, missing = cl.findcommonmissing(common, heads)
1588 1588 if not missing:
1589 1589 return None
1590 1590 return self._changegroupsubset(common, missing, heads, source)
1591 1591
1592 1592 def _changegroupsubset(self, commonrevs, csets, heads, source):
1593 1593
1594 1594 cl = self.changelog
1595 1595 mf = self.manifest
1596 1596 mfs = {} # needed manifests
1597 1597 fnodes = {} # needed file nodes
1598 1598 changedfiles = set()
1599 1599 fstate = ['', {}]
1600 1600 count = [0]
1601 1601
1602 1602 # can we go through the fast path ?
1603 1603 heads.sort()
1604 1604 if heads == sorted(self.heads()):
1605 1605 return self._changegroup(csets, source)
1606 1606
1607 1607 # slow path
1608 1608 self.hook('preoutgoing', throw=True, source=source)
1609 1609 self.changegroupinfo(csets, source)
1610 1610
1611 1611 # filter any nodes that claim to be part of the known set
1612 1612 def prune(revlog, missing):
1613 1613 return [n for n in missing
1614 1614 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1615 1615
1616 1616 def lookup(revlog, x):
1617 1617 if revlog == cl:
1618 1618 c = cl.read(x)
1619 1619 changedfiles.update(c[3])
1620 1620 mfs.setdefault(c[0], x)
1621 1621 count[0] += 1
1622 1622 self.ui.progress(_('bundling'), count[0],
1623 1623 unit=_('changesets'), total=len(csets))
1624 1624 return x
1625 1625 elif revlog == mf:
1626 1626 clnode = mfs[x]
1627 1627 mdata = mf.readfast(x)
1628 1628 for f in changedfiles:
1629 1629 if f in mdata:
1630 1630 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1631 1631 count[0] += 1
1632 1632 self.ui.progress(_('bundling'), count[0],
1633 1633 unit=_('manifests'), total=len(mfs))
1634 1634 return mfs[x]
1635 1635 else:
1636 1636 self.ui.progress(
1637 1637 _('bundling'), count[0], item=fstate[0],
1638 1638 unit=_('files'), total=len(changedfiles))
1639 1639 return fstate[1][x]
1640 1640
1641 1641 bundler = changegroup.bundle10(lookup)
1642 1642 reorder = self.ui.config('bundle', 'reorder', 'auto')
1643 1643 if reorder == 'auto':
1644 1644 reorder = None
1645 1645 else:
1646 1646 reorder = util.parsebool(reorder)
1647 1647
1648 1648 def gengroup():
1649 1649 # Create a changenode group generator that will call our functions
1650 1650 # back to lookup the owning changenode and collect information.
1651 1651 for chunk in cl.group(csets, bundler, reorder=reorder):
1652 1652 yield chunk
1653 1653 self.ui.progress(_('bundling'), None)
1654 1654
1655 1655 # Create a generator for the manifestnodes that calls our lookup
1656 1656 # and data collection functions back.
1657 1657 count[0] = 0
1658 1658 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1659 1659 yield chunk
1660 1660 self.ui.progress(_('bundling'), None)
1661 1661
1662 1662 mfs.clear()
1663 1663
1664 1664 # Go through all our files in order sorted by name.
1665 1665 count[0] = 0
1666 1666 for fname in sorted(changedfiles):
1667 1667 filerevlog = self.file(fname)
1668 1668 if not len(filerevlog):
1669 1669 raise util.Abort(_("empty or missing revlog for %s") % fname)
1670 1670 fstate[0] = fname
1671 1671 fstate[1] = fnodes.pop(fname, {})
1672 1672
1673 1673 nodelist = prune(filerevlog, fstate[1])
1674 1674 if nodelist:
1675 1675 count[0] += 1
1676 1676 yield bundler.fileheader(fname)
1677 1677 for chunk in filerevlog.group(nodelist, bundler, reorder):
1678 1678 yield chunk
1679 1679
1680 1680 # Signal that no more groups are left.
1681 1681 yield bundler.close()
1682 1682 self.ui.progress(_('bundling'), None)
1683 1683
1684 1684 if csets:
1685 1685 self.hook('outgoing', node=hex(csets[0]), source=source)
1686 1686
1687 1687 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1688 1688
1689 1689 def changegroup(self, basenodes, source):
1690 1690 # to avoid a race we use changegroupsubset() (issue1320)
1691 1691 return self.changegroupsubset(basenodes, self.heads(), source)
1692 1692
1693 1693 def _changegroup(self, nodes, source):
1694 1694 """Compute the changegroup of all nodes that we have that a recipient
1695 1695 doesn't. Return a chunkbuffer object whose read() method will return
1696 1696 successive changegroup chunks.
1697 1697
1698 1698 This is much easier than the previous function as we can assume that
1699 1699 the recipient has any changenode we aren't sending them.
1700 1700
1701 1701 nodes is the set of nodes to send"""
1702 1702
1703 1703 cl = self.changelog
1704 1704 mf = self.manifest
1705 1705 mfs = {}
1706 1706 changedfiles = set()
1707 1707 fstate = ['']
1708 1708 count = [0]
1709 1709
1710 1710 self.hook('preoutgoing', throw=True, source=source)
1711 1711 self.changegroupinfo(nodes, source)
1712 1712
1713 1713 revset = set([cl.rev(n) for n in nodes])
1714 1714
1715 1715 def gennodelst(log):
1716 1716 return [log.node(r) for r in log if log.linkrev(r) in revset]
1717 1717
1718 1718 def lookup(revlog, x):
1719 1719 if revlog == cl:
1720 1720 c = cl.read(x)
1721 1721 changedfiles.update(c[3])
1722 1722 mfs.setdefault(c[0], x)
1723 1723 count[0] += 1
1724 1724 self.ui.progress(_('bundling'), count[0],
1725 1725 unit=_('changesets'), total=len(nodes))
1726 1726 return x
1727 1727 elif revlog == mf:
1728 1728 count[0] += 1
1729 1729 self.ui.progress(_('bundling'), count[0],
1730 1730 unit=_('manifests'), total=len(mfs))
1731 1731 return cl.node(revlog.linkrev(revlog.rev(x)))
1732 1732 else:
1733 1733 self.ui.progress(
1734 1734 _('bundling'), count[0], item=fstate[0],
1735 1735 total=len(changedfiles), unit=_('files'))
1736 1736 return cl.node(revlog.linkrev(revlog.rev(x)))
1737 1737
1738 1738 bundler = changegroup.bundle10(lookup)
1739 1739 reorder = self.ui.config('bundle', 'reorder', 'auto')
1740 1740 if reorder == 'auto':
1741 1741 reorder = None
1742 1742 else:
1743 1743 reorder = util.parsebool(reorder)
1744 1744
1745 1745 def gengroup():
1746 1746 '''yield a sequence of changegroup chunks (strings)'''
1747 1747 # construct a list of all changed files
1748 1748
1749 1749 for chunk in cl.group(nodes, bundler, reorder=reorder):
1750 1750 yield chunk
1751 1751 self.ui.progress(_('bundling'), None)
1752 1752
1753 1753 count[0] = 0
1754 1754 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1755 1755 yield chunk
1756 1756 self.ui.progress(_('bundling'), None)
1757 1757
1758 1758 count[0] = 0
1759 1759 for fname in sorted(changedfiles):
1760 1760 filerevlog = self.file(fname)
1761 1761 if not len(filerevlog):
1762 1762 raise util.Abort(_("empty or missing revlog for %s") % fname)
1763 1763 fstate[0] = fname
1764 1764 nodelist = gennodelst(filerevlog)
1765 1765 if nodelist:
1766 1766 count[0] += 1
1767 1767 yield bundler.fileheader(fname)
1768 1768 for chunk in filerevlog.group(nodelist, bundler, reorder):
1769 1769 yield chunk
1770 1770 yield bundler.close()
1771 1771 self.ui.progress(_('bundling'), None)
1772 1772
1773 1773 if nodes:
1774 1774 self.hook('outgoing', node=hex(nodes[0]), source=source)
1775 1775
1776 1776 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1777 1777
1778 1778 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1779 1779 """Add the changegroup returned by source.read() to this repo.
1780 1780 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1781 1781 the URL of the repo where this changegroup is coming from.
1782 1782 If lock is not None, the function takes ownership of the lock
1783 1783 and releases it after the changegroup is added.
1784 1784
1785 1785 Return an integer summarizing the change to this repo:
1786 1786 - nothing changed or no source: 0
1787 1787 - more heads than before: 1+added heads (2..n)
1788 1788 - fewer heads than before: -1-removed heads (-2..-n)
1789 1789 - number of heads stays the same: 1
1790 1790 """
1791 1791 def csmap(x):
1792 1792 self.ui.debug("add changeset %s\n" % short(x))
1793 1793 return len(cl)
1794 1794
1795 1795 def revmap(x):
1796 1796 return cl.rev(x)
1797 1797
1798 1798 if not source:
1799 1799 return 0
1800 1800
1801 1801 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1802 1802
1803 1803 changesets = files = revisions = 0
1804 1804 efiles = set()
1805 1805
1806 1806 # write changelog data to temp files so concurrent readers will not see
1807 1807 # inconsistent view
1808 1808 cl = self.changelog
1809 1809 cl.delayupdate()
1810 1810 oldheads = cl.heads()
1811 1811
1812 1812 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1813 1813 try:
1814 1814 trp = weakref.proxy(tr)
1815 1815 # pull off the changeset group
1816 1816 self.ui.status(_("adding changesets\n"))
1817 1817 clstart = len(cl)
1818 1818 class prog(object):
1819 1819 step = _('changesets')
1820 1820 count = 1
1821 1821 ui = self.ui
1822 1822 total = None
1823 1823 def __call__(self):
1824 1824 self.ui.progress(self.step, self.count, unit=_('chunks'),
1825 1825 total=self.total)
1826 1826 self.count += 1
1827 1827 pr = prog()
1828 1828 source.callback = pr
1829 1829
1830 1830 source.changelogheader()
1831 1831 if (cl.addgroup(source, csmap, trp) is None
1832 1832 and not emptyok):
1833 1833 raise util.Abort(_("received changelog group is empty"))
1834 1834 clend = len(cl)
1835 1835 changesets = clend - clstart
1836 1836 for c in xrange(clstart, clend):
1837 1837 efiles.update(self[c].files())
1838 1838 efiles = len(efiles)
1839 1839 self.ui.progress(_('changesets'), None)
1840 1840
1841 1841 # pull off the manifest group
1842 1842 self.ui.status(_("adding manifests\n"))
1843 1843 pr.step = _('manifests')
1844 1844 pr.count = 1
1845 1845 pr.total = changesets # manifests <= changesets
1846 1846 # no need to check for empty manifest group here:
1847 1847 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1848 1848 # no new manifest will be created and the manifest group will
1849 1849 # be empty during the pull
1850 1850 source.manifestheader()
1851 1851 self.manifest.addgroup(source, revmap, trp)
1852 1852 self.ui.progress(_('manifests'), None)
1853 1853
1854 1854 needfiles = {}
1855 1855 if self.ui.configbool('server', 'validate', default=False):
1856 1856 # validate incoming csets have their manifests
1857 1857 for cset in xrange(clstart, clend):
1858 1858 mfest = self.changelog.read(self.changelog.node(cset))[0]
1859 1859 mfest = self.manifest.readdelta(mfest)
1860 1860 # store file nodes we must see
1861 1861 for f, n in mfest.iteritems():
1862 1862 needfiles.setdefault(f, set()).add(n)
1863 1863
1864 1864 # process the files
1865 1865 self.ui.status(_("adding file changes\n"))
1866 1866 pr.step = _('files')
1867 1867 pr.count = 1
1868 1868 pr.total = efiles
1869 1869 source.callback = None
1870 1870
1871 1871 while True:
1872 1872 chunkdata = source.filelogheader()
1873 1873 if not chunkdata:
1874 1874 break
1875 1875 f = chunkdata["filename"]
1876 1876 self.ui.debug("adding %s revisions\n" % f)
1877 1877 pr()
1878 1878 fl = self.file(f)
1879 1879 o = len(fl)
1880 1880 if fl.addgroup(source, revmap, trp) is None:
1881 1881 raise util.Abort(_("received file revlog group is empty"))
1882 1882 revisions += len(fl) - o
1883 1883 files += 1
1884 1884 if f in needfiles:
1885 1885 needs = needfiles[f]
1886 1886 for new in xrange(o, len(fl)):
1887 1887 n = fl.node(new)
1888 1888 if n in needs:
1889 1889 needs.remove(n)
1890 1890 if not needs:
1891 1891 del needfiles[f]
1892 1892 self.ui.progress(_('files'), None)
1893 1893
1894 1894 for f, needs in needfiles.iteritems():
1895 1895 fl = self.file(f)
1896 1896 for n in needs:
1897 1897 try:
1898 1898 fl.rev(n)
1899 1899 except error.LookupError:
1900 1900 raise util.Abort(
1901 1901 _('missing file data for %s:%s - run hg verify') %
1902 1902 (f, hex(n)))
1903 1903
1904 1904 dh = 0
1905 1905 if oldheads:
1906 1906 heads = cl.heads()
1907 1907 dh = len(heads) - len(oldheads)
1908 1908 for h in heads:
1909 1909 if h not in oldheads and 'close' in self[h].extra():
1910 1910 dh -= 1
1911 1911 htext = ""
1912 1912 if dh:
1913 1913 htext = _(" (%+d heads)") % dh
1914 1914
1915 1915 self.ui.status(_("added %d changesets"
1916 1916 " with %d changes to %d files%s\n")
1917 1917 % (changesets, revisions, files, htext))
1918 1918
1919 1919 if changesets > 0:
1920 1920 p = lambda: cl.writepending() and self.root or ""
1921 1921 self.hook('pretxnchangegroup', throw=True,
1922 1922 node=hex(cl.node(clstart)), source=srctype,
1923 1923 url=url, pending=p)
1924 1924
1925 1925 # make changelog see real files again
1926 1926 cl.finalize(trp)
1927 1927
1928 1928 tr.close()
1929 1929 finally:
1930 1930 tr.release()
1931 1931 if lock:
1932 1932 lock.release()
1933 1933
1934 1934 if changesets > 0:
1935 1935 # forcefully update the on-disk branch cache
1936 1936 self.ui.debug("updating the branch cache\n")
1937 1937 self.updatebranchcache()
1938 1938 self.hook("changegroup", node=hex(cl.node(clstart)),
1939 1939 source=srctype, url=url)
1940 1940
1941 1941 for i in xrange(clstart, clend):
1942 1942 self.hook("incoming", node=hex(cl.node(i)),
1943 1943 source=srctype, url=url)
1944 1944
1945 1945 # never return 0 here:
1946 1946 if dh < 0:
1947 1947 return dh - 1
1948 1948 else:
1949 1949 return dh + 1
1950 1950
1951 1951 def stream_in(self, remote, requirements):
1952 1952 lock = self.lock()
1953 1953 try:
1954 1954 fp = remote.stream_out()
1955 1955 l = fp.readline()
1956 1956 try:
1957 1957 resp = int(l)
1958 1958 except ValueError:
1959 1959 raise error.ResponseError(
1960 1960 _('Unexpected response from remote server:'), l)
1961 1961 if resp == 1:
1962 1962 raise util.Abort(_('operation forbidden by server'))
1963 1963 elif resp == 2:
1964 1964 raise util.Abort(_('locking the remote repository failed'))
1965 1965 elif resp != 0:
1966 1966 raise util.Abort(_('the server sent an unknown error code'))
1967 1967 self.ui.status(_('streaming all changes\n'))
1968 1968 l = fp.readline()
1969 1969 try:
1970 1970 total_files, total_bytes = map(int, l.split(' ', 1))
1971 1971 except (ValueError, TypeError):
1972 1972 raise error.ResponseError(
1973 1973 _('Unexpected response from remote server:'), l)
1974 1974 self.ui.status(_('%d files to transfer, %s of data\n') %
1975 1975 (total_files, util.bytecount(total_bytes)))
1976 1976 start = time.time()
1977 1977 for i in xrange(total_files):
1978 1978 # XXX doesn't support '\n' or '\r' in filenames
1979 1979 l = fp.readline()
1980 1980 try:
1981 1981 name, size = l.split('\0', 1)
1982 1982 size = int(size)
1983 1983 except (ValueError, TypeError):
1984 1984 raise error.ResponseError(
1985 1985 _('Unexpected response from remote server:'), l)
1986 1986 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1987 1987 # for backwards compat, name was partially encoded
1988 1988 ofp = self.sopener(store.decodedir(name), 'w')
1989 1989 for chunk in util.filechunkiter(fp, limit=size):
1990 1990 ofp.write(chunk)
1991 1991 ofp.close()
1992 1992 elapsed = time.time() - start
1993 1993 if elapsed <= 0:
1994 1994 elapsed = 0.001
1995 1995 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1996 1996 (util.bytecount(total_bytes), elapsed,
1997 1997 util.bytecount(total_bytes / elapsed)))
1998 1998
1999 1999 # new requirements = old non-format requirements + new format-related
2000 2000 # requirements from the streamed-in repository
2001 2001 requirements.update(set(self.requirements) - self.supportedformats)
2002 2002 self._applyrequirements(requirements)
2003 2003 self._writerequirements()
2004 2004
2005 2005 self.invalidate()
2006 2006 return len(self.heads()) + 1
2007 2007 finally:
2008 2008 lock.release()
2009 2009
2010 2010 def clone(self, remote, heads=[], stream=False):
2011 2011 '''clone remote repository.
2012 2012
2013 2013 keyword arguments:
2014 2014 heads: list of revs to clone (forces use of pull)
2015 2015 stream: use streaming clone if possible'''
2016 2016
2017 2017 # now, all clients that can request uncompressed clones can
2018 2018 # read repo formats supported by all servers that can serve
2019 2019 # them.
2020 2020
2021 2021 # if revlog format changes, client will have to check version
2022 2022 # and format flags on "stream" capability, and use
2023 2023 # uncompressed only if compatible.
2024 2024
2025 2025 if stream and not heads:
2026 2026 # 'stream' means remote revlog format is revlogv1 only
2027 2027 if remote.capable('stream'):
2028 2028 return self.stream_in(remote, set(('revlogv1',)))
2029 2029 # otherwise, 'streamreqs' contains the remote revlog format
2030 2030 streamreqs = remote.capable('streamreqs')
2031 2031 if streamreqs:
2032 2032 streamreqs = set(streamreqs.split(','))
2033 2033 # if we support it, stream in and adjust our requirements
2034 2034 if not streamreqs - self.supportedformats:
2035 2035 return self.stream_in(remote, streamreqs)
2036 2036 return self.pull(remote, heads)
2037 2037
2038 2038 def pushkey(self, namespace, key, old, new):
2039 2039 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2040 2040 old=old, new=new)
2041 2041 ret = pushkey.push(self, namespace, key, old, new)
2042 2042 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2043 2043 ret=ret)
2044 2044 return ret
2045 2045
2046 2046 def listkeys(self, namespace):
2047 2047 self.hook('prelistkeys', throw=True, namespace=namespace)
2048 2048 values = pushkey.list(self, namespace)
2049 2049 self.hook('listkeys', namespace=namespace, values=values)
2050 2050 return values
2051 2051
2052 2052 def debugwireargs(self, one, two, three=None, four=None, five=None):
2053 2053 '''used to test argument passing over the wire'''
2054 2054 return "%s %s %s %s %s" % (one, two, three, four, five)
2055 2055
2056 2056 def savecommitmessage(self, text):
2057 2057 fp = self.opener('last-message.txt', 'wb')
2058 2058 try:
2059 2059 fp.write(text)
2060 2060 finally:
2061 2061 fp.close()
2062 2062 return self.pathto(fp.name[len(self.root)+1:])
2063 2063
2064 2064 # used to avoid circular references so destructors work
2065 2065 def aftertrans(files):
2066 2066 renamefiles = [tuple(t) for t in files]
2067 2067 def a():
2068 2068 for src, dest in renamefiles:
2069 2069 util.rename(src, dest)
2070 2070 return a
2071 2071
2072 2072 def undoname(fn):
2073 2073 base, name = os.path.split(fn)
2074 2074 assert name.startswith('journal')
2075 2075 return os.path.join(base, name.replace('journal', 'undo', 1))
2076 2076
2077 2077 def instance(ui, path, create):
2078 2078 return localrepository(ui, util.urllocalpath(path), create)
2079 2079
2080 2080 def islocal(path):
2081 2081 return True
@@ -1,182 +1,183 b''
1 1 setup repo
2 2 $ hg init t
3 3 $ cd t
4 4 $ echo a > a
5 5 $ hg commit -Am'add a'
6 6 adding a
7 7 $ hg verify
8 8 checking changesets
9 9 checking manifests
10 10 crosschecking files in changesets and manifests
11 11 checking files
12 12 1 files, 1 changesets, 1 total revisions
13 13 $ hg parents
14 14 changeset: 0:1f0dee641bb7
15 15 tag: tip
16 16 user: test
17 17 date: Thu Jan 01 00:00:00 1970 +0000
18 18 summary: add a
19 19
20 20
21 21 rollback to null revision
22 22 $ hg status
23 23 $ hg rollback
24 24 repository tip rolled back to revision -1 (undo commit)
25 25 working directory now based on revision -1
26 26 $ hg verify
27 27 checking changesets
28 28 checking manifests
29 29 crosschecking files in changesets and manifests
30 30 checking files
31 31 0 files, 0 changesets, 0 total revisions
32 32 $ hg parents
33 33 $ hg status
34 34 A a
35 35
36 36 Two changesets this time so we rollback to a real changeset
37 37 $ hg commit -m'add a again'
38 38 $ echo a >> a
39 39 $ hg commit -m'modify a'
40 40
41 41 Test issue 902 (current branch is preserved)
42 42 $ hg branch test
43 43 marked working directory as branch test
44 44 $ hg rollback
45 45 repository tip rolled back to revision 0 (undo commit)
46 46 working directory now based on revision 0
47 47 $ hg branch
48 48 default
49 49
50 50 Test issue 1635 (commit message saved)
51 51 $ cat .hg/last-message.txt ; echo
52 52 modify a
53 53
54 54 Test rollback of hg before issue 902 was fixed
55 55
56 56 $ hg commit -m "test3"
57 57 $ hg branch test
58 58 marked working directory as branch test
59 59 $ rm .hg/undo.branch
60 60 $ hg rollback
61 61 repository tip rolled back to revision 0 (undo commit)
62 62 named branch could not be reset: current branch is still 'test'
63 63 working directory now based on revision 0
64 64 $ hg branch
65 65 test
66 66
67 67 working dir unaffected by rollback: do not restore dirstate et. al.
68 68 $ hg log --template '{rev} {branch} {desc|firstline}\n'
69 69 0 default add a again
70 70 $ hg status
71 71 M a
72 72 $ hg bookmark foo
73 73 $ hg commit -m'modify a again'
74 74 $ echo b > b
75 75 $ hg commit -Am'add b'
76 76 adding b
77 77 $ hg log --template '{rev} {branch} {desc|firstline}\n'
78 78 2 test add b
79 79 1 test modify a again
80 80 0 default add a again
81 81 $ hg update default
82 82 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
83 83 $ hg bookmark bar
84 84 $ cat .hg/undo.branch ; echo
85 85 test
86 86 $ hg rollback -f
87 87 repository tip rolled back to revision 1 (undo commit)
88 88 $ hg id -n
89 89 0
90 90 $ hg branch
91 91 default
92 92 $ cat .hg/bookmarks.current ; echo
93 93 bar
94 94 $ hg bookmark --delete foo
95 95
96 96 rollback by pretxncommit saves commit message (issue 1635)
97 97
98 98 $ echo a >> a
99 99 $ hg --config hooks.pretxncommit=false commit -m"precious commit message"
100 100 transaction abort!
101 101 rollback completed
102 102 abort: pretxncommit hook exited with status * (glob)
103 103 [255]
104 104 $ cat .hg/last-message.txt ; echo
105 105 precious commit message
106 106
107 107 same thing, but run $EDITOR
108 108
109 109 $ cat > editor << '__EOF__'
110 110 > #!/bin/sh
111 111 > echo "another precious commit message" > "$1"
112 112 > __EOF__
113 113 $ chmod +x editor
114 114 $ HGEDITOR="'`pwd`'"/editor hg --config hooks.pretxncommit=false commit 2>&1
115 115 transaction abort!
116 116 rollback completed
117 117 note: commit message saved in .hg/last-message.txt
118 118 abort: pretxncommit hook exited with status * (glob)
119 119 [255]
120 120 $ cat .hg/last-message.txt
121 121 another precious commit message
122 122
123 123 test rollback on served repository
124 124
125 125 $ hg commit -m "precious commit message"
126 126 $ hg serve -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
127 127 $ cat hg.pid >> $DAEMON_PIDS
128 128 $ cd ..
129 129 $ hg clone http://localhost:$HGPORT u
130 130 requesting all changes
131 131 adding changesets
132 132 adding manifests
133 133 adding file changes
134 134 added 3 changesets with 2 changes to 1 files (+1 heads)
135 135 updating to branch default
136 136 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
137 137 $ cd u
138 138 $ hg id default
139 139 068774709090
140 140
141 141 now rollback and observe that 'hg serve' reloads the repository and
142 142 presents the correct tip changeset:
143 143
144 144 $ hg -R ../t rollback
145 145 repository tip rolled back to revision 1 (undo commit)
146 146 working directory now based on revision 0
147 147 $ hg id default
148 148 791dd2169706
149 149
150 150 update to older changeset and then refuse rollback, because
151 151 that would lose data (issue2998)
152 152 $ cd ../t
153 153 $ hg -q update
154 154 $ rm `hg status -un`
155 155 $ template='{rev}:{node|short} [{branch}] {desc|firstline}\n'
156 156 $ echo 'valuable new file' > b
157 157 $ echo 'valuable modification' >> a
158 158 $ hg commit -A -m'a valuable change'
159 159 adding b
160 160 $ hg update 0
161 161 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
162 162 $ hg rollback
163 abort: rollback of last commit while not checked out may lose data (use -f to force)
163 abort: rollback of last commit while not checked out may lose data
164 (use -f to force)
164 165 [255]
165 166 $ hg tip -q
166 167 2:4d9cd3795eea
167 168 $ hg rollback -f
168 169 repository tip rolled back to revision 1 (undo commit)
169 170 $ hg status
170 171 $ hg log --removed b # yep, it's gone
171 172
172 173 same again, but emulate an old client that doesn't write undo.desc
173 174 $ hg -q update
174 175 $ echo 'valuable modification redux' >> a
175 176 $ hg commit -m'a valuable change redux'
176 177 $ rm .hg/undo.desc
177 178 $ hg update 0
178 179 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
179 180 $ hg rollback
180 181 rolling back unknown transaction
181 182 $ cat a
182 183 a
General Comments 0
You need to be logged in to leave comments. Login now