##// END OF EJS Templates
localrepo: add a cache with stat info for files under .hg/
Idan Kamara -
r14929:4bf9493e default
parent child Browse files
Show More
@@ -1,2003 +1,2009 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 23 'known', 'getbundle'))
24 24 supportedformats = set(('revlogv1', 'generaldelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=False):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 34 self.opener = scmutil.opener(self.path)
35 35 self.wopener = scmutil.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 util.makedir(self.path, notindexed=True)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener.append(
60 60 "00changelog.i",
61 61 '\0\0\0\2' # represents revlogv2
62 62 ' dummy changelog to prevent using the old repo layout'
63 63 )
64 64 if self.ui.configbool('format', 'generaldelta', False):
65 65 requirements.append("generaldelta")
66 66 requirements = set(requirements)
67 67 else:
68 68 raise error.RepoError(_("repository %s not found") % path)
69 69 elif create:
70 70 raise error.RepoError(_("repository %s already exists") % path)
71 71 else:
72 72 try:
73 73 requirements = scmutil.readrequires(self.opener, self.supported)
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 requirements = set()
78 78
79 79 self.sharedpath = self.path
80 80 try:
81 81 s = os.path.realpath(self.opener.read("sharedpath"))
82 82 if not os.path.exists(s):
83 83 raise error.RepoError(
84 84 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 85 self.sharedpath = s
86 86 except IOError, inst:
87 87 if inst.errno != errno.ENOENT:
88 88 raise
89 89
90 90 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 91 self.spath = self.store.path
92 92 self.sopener = self.store.opener
93 93 self.sjoin = self.store.join
94 94 self.opener.createmode = self.store.createmode
95 95 self._applyrequirements(requirements)
96 96 if create:
97 97 self._writerequirements()
98 98
99 99 # These two define the set of tags for this repository. _tags
100 100 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 101 # 'local'. (Global tags are defined by .hgtags across all
102 102 # heads, and local tags are defined in .hg/localtags.) They
103 103 # constitute the in-memory cache of tags.
104 104 self._tags = None
105 105 self._tagtypes = None
106 106
107 107 self._branchcache = None
108 108 self._branchcachetip = None
109 109 self.nodetagscache = None
110 110 self.filterpats = {}
111 111 self._datafilters = {}
112 112 self._transref = self._lockref = self._wlockref = None
113 113
114 # A cache for various files under .hg/ that tracks file changes,
115 # (used by the filecache decorator)
116 #
117 # Maps a property name to its util.filecacheentry
118 self._filecache = {}
119
114 120 def _applyrequirements(self, requirements):
115 121 self.requirements = requirements
116 122 openerreqs = set(('revlogv1', 'generaldelta'))
117 123 self.sopener.options = dict((r, 1) for r in requirements
118 124 if r in openerreqs)
119 125
120 126 def _writerequirements(self):
121 127 reqfile = self.opener("requires", "w")
122 128 for r in self.requirements:
123 129 reqfile.write("%s\n" % r)
124 130 reqfile.close()
125 131
126 132 def _checknested(self, path):
127 133 """Determine if path is a legal nested repository."""
128 134 if not path.startswith(self.root):
129 135 return False
130 136 subpath = path[len(self.root) + 1:]
131 137
132 138 # XXX: Checking against the current working copy is wrong in
133 139 # the sense that it can reject things like
134 140 #
135 141 # $ hg cat -r 10 sub/x.txt
136 142 #
137 143 # if sub/ is no longer a subrepository in the working copy
138 144 # parent revision.
139 145 #
140 146 # However, it can of course also allow things that would have
141 147 # been rejected before, such as the above cat command if sub/
142 148 # is a subrepository now, but was a normal directory before.
143 149 # The old path auditor would have rejected by mistake since it
144 150 # panics when it sees sub/.hg/.
145 151 #
146 152 # All in all, checking against the working copy seems sensible
147 153 # since we want to prevent access to nested repositories on
148 154 # the filesystem *now*.
149 155 ctx = self[None]
150 156 parts = util.splitpath(subpath)
151 157 while parts:
152 158 prefix = os.sep.join(parts)
153 159 if prefix in ctx.substate:
154 160 if prefix == subpath:
155 161 return True
156 162 else:
157 163 sub = ctx.sub(prefix)
158 164 return sub.checknested(subpath[len(prefix) + 1:])
159 165 else:
160 166 parts.pop()
161 167 return False
162 168
163 169 @util.propertycache
164 170 def _bookmarks(self):
165 171 return bookmarks.read(self)
166 172
167 173 @util.propertycache
168 174 def _bookmarkcurrent(self):
169 175 return bookmarks.readcurrent(self)
170 176
171 177 @propertycache
172 178 def changelog(self):
173 179 c = changelog.changelog(self.sopener)
174 180 if 'HG_PENDING' in os.environ:
175 181 p = os.environ['HG_PENDING']
176 182 if p.startswith(self.root):
177 183 c.readpending('00changelog.i.a')
178 184 return c
179 185
180 186 @propertycache
181 187 def manifest(self):
182 188 return manifest.manifest(self.sopener)
183 189
184 190 @propertycache
185 191 def dirstate(self):
186 192 warned = [0]
187 193 def validate(node):
188 194 try:
189 195 self.changelog.rev(node)
190 196 return node
191 197 except error.LookupError:
192 198 if not warned[0]:
193 199 warned[0] = True
194 200 self.ui.warn(_("warning: ignoring unknown"
195 201 " working parent %s!\n") % short(node))
196 202 return nullid
197 203
198 204 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
199 205
200 206 def __getitem__(self, changeid):
201 207 if changeid is None:
202 208 return context.workingctx(self)
203 209 return context.changectx(self, changeid)
204 210
205 211 def __contains__(self, changeid):
206 212 try:
207 213 return bool(self.lookup(changeid))
208 214 except error.RepoLookupError:
209 215 return False
210 216
211 217 def __nonzero__(self):
212 218 return True
213 219
214 220 def __len__(self):
215 221 return len(self.changelog)
216 222
217 223 def __iter__(self):
218 224 for i in xrange(len(self)):
219 225 yield i
220 226
221 227 def set(self, expr, *args):
222 228 '''
223 229 Yield a context for each matching revision, after doing arg
224 230 replacement via revset.formatspec
225 231 '''
226 232
227 233 expr = revset.formatspec(expr, *args)
228 234 m = revset.match(None, expr)
229 235 for r in m(self, range(len(self))):
230 236 yield self[r]
231 237
232 238 def url(self):
233 239 return 'file:' + self.root
234 240
235 241 def hook(self, name, throw=False, **args):
236 242 return hook.hook(self.ui, self, name, throw, **args)
237 243
238 244 tag_disallowed = ':\r\n'
239 245
240 246 def _tag(self, names, node, message, local, user, date, extra={}):
241 247 if isinstance(names, str):
242 248 allchars = names
243 249 names = (names,)
244 250 else:
245 251 allchars = ''.join(names)
246 252 for c in self.tag_disallowed:
247 253 if c in allchars:
248 254 raise util.Abort(_('%r cannot be used in a tag name') % c)
249 255
250 256 branches = self.branchmap()
251 257 for name in names:
252 258 self.hook('pretag', throw=True, node=hex(node), tag=name,
253 259 local=local)
254 260 if name in branches:
255 261 self.ui.warn(_("warning: tag %s conflicts with existing"
256 262 " branch name\n") % name)
257 263
258 264 def writetags(fp, names, munge, prevtags):
259 265 fp.seek(0, 2)
260 266 if prevtags and prevtags[-1] != '\n':
261 267 fp.write('\n')
262 268 for name in names:
263 269 m = munge and munge(name) or name
264 270 if self._tagtypes and name in self._tagtypes:
265 271 old = self._tags.get(name, nullid)
266 272 fp.write('%s %s\n' % (hex(old), m))
267 273 fp.write('%s %s\n' % (hex(node), m))
268 274 fp.close()
269 275
270 276 prevtags = ''
271 277 if local:
272 278 try:
273 279 fp = self.opener('localtags', 'r+')
274 280 except IOError:
275 281 fp = self.opener('localtags', 'a')
276 282 else:
277 283 prevtags = fp.read()
278 284
279 285 # local tags are stored in the current charset
280 286 writetags(fp, names, None, prevtags)
281 287 for name in names:
282 288 self.hook('tag', node=hex(node), tag=name, local=local)
283 289 return
284 290
285 291 try:
286 292 fp = self.wfile('.hgtags', 'rb+')
287 293 except IOError, e:
288 294 if e.errno != errno.ENOENT:
289 295 raise
290 296 fp = self.wfile('.hgtags', 'ab')
291 297 else:
292 298 prevtags = fp.read()
293 299
294 300 # committed tags are stored in UTF-8
295 301 writetags(fp, names, encoding.fromlocal, prevtags)
296 302
297 303 fp.close()
298 304
299 305 if '.hgtags' not in self.dirstate:
300 306 self[None].add(['.hgtags'])
301 307
302 308 m = matchmod.exact(self.root, '', ['.hgtags'])
303 309 tagnode = self.commit(message, user, date, extra=extra, match=m)
304 310
305 311 for name in names:
306 312 self.hook('tag', node=hex(node), tag=name, local=local)
307 313
308 314 return tagnode
309 315
310 316 def tag(self, names, node, message, local, user, date):
311 317 '''tag a revision with one or more symbolic names.
312 318
313 319 names is a list of strings or, when adding a single tag, names may be a
314 320 string.
315 321
316 322 if local is True, the tags are stored in a per-repository file.
317 323 otherwise, they are stored in the .hgtags file, and a new
318 324 changeset is committed with the change.
319 325
320 326 keyword arguments:
321 327
322 328 local: whether to store tags in non-version-controlled file
323 329 (default False)
324 330
325 331 message: commit message to use if committing
326 332
327 333 user: name of user to use if committing
328 334
329 335 date: date tuple to use if committing'''
330 336
331 337 if not local:
332 338 for x in self.status()[:5]:
333 339 if '.hgtags' in x:
334 340 raise util.Abort(_('working copy of .hgtags is changed '
335 341 '(please commit .hgtags manually)'))
336 342
337 343 self.tags() # instantiate the cache
338 344 self._tag(names, node, message, local, user, date)
339 345
340 346 def tags(self):
341 347 '''return a mapping of tag to node'''
342 348 if self._tags is None:
343 349 (self._tags, self._tagtypes) = self._findtags()
344 350
345 351 return self._tags
346 352
347 353 def _findtags(self):
348 354 '''Do the hard work of finding tags. Return a pair of dicts
349 355 (tags, tagtypes) where tags maps tag name to node, and tagtypes
350 356 maps tag name to a string like \'global\' or \'local\'.
351 357 Subclasses or extensions are free to add their own tags, but
352 358 should be aware that the returned dicts will be retained for the
353 359 duration of the localrepo object.'''
354 360
355 361 # XXX what tagtype should subclasses/extensions use? Currently
356 362 # mq and bookmarks add tags, but do not set the tagtype at all.
357 363 # Should each extension invent its own tag type? Should there
358 364 # be one tagtype for all such "virtual" tags? Or is the status
359 365 # quo fine?
360 366
361 367 alltags = {} # map tag name to (node, hist)
362 368 tagtypes = {}
363 369
364 370 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
365 371 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
366 372
367 373 # Build the return dicts. Have to re-encode tag names because
368 374 # the tags module always uses UTF-8 (in order not to lose info
369 375 # writing to the cache), but the rest of Mercurial wants them in
370 376 # local encoding.
371 377 tags = {}
372 378 for (name, (node, hist)) in alltags.iteritems():
373 379 if node != nullid:
374 380 try:
375 381 # ignore tags to unknown nodes
376 382 self.changelog.lookup(node)
377 383 tags[encoding.tolocal(name)] = node
378 384 except error.LookupError:
379 385 pass
380 386 tags['tip'] = self.changelog.tip()
381 387 tagtypes = dict([(encoding.tolocal(name), value)
382 388 for (name, value) in tagtypes.iteritems()])
383 389 return (tags, tagtypes)
384 390
385 391 def tagtype(self, tagname):
386 392 '''
387 393 return the type of the given tag. result can be:
388 394
389 395 'local' : a local tag
390 396 'global' : a global tag
391 397 None : tag does not exist
392 398 '''
393 399
394 400 self.tags()
395 401
396 402 return self._tagtypes.get(tagname)
397 403
398 404 def tagslist(self):
399 405 '''return a list of tags ordered by revision'''
400 406 l = []
401 407 for t, n in self.tags().iteritems():
402 408 r = self.changelog.rev(n)
403 409 l.append((r, t, n))
404 410 return [(t, n) for r, t, n in sorted(l)]
405 411
406 412 def nodetags(self, node):
407 413 '''return the tags associated with a node'''
408 414 if not self.nodetagscache:
409 415 self.nodetagscache = {}
410 416 for t, n in self.tags().iteritems():
411 417 self.nodetagscache.setdefault(n, []).append(t)
412 418 for tags in self.nodetagscache.itervalues():
413 419 tags.sort()
414 420 return self.nodetagscache.get(node, [])
415 421
416 422 def nodebookmarks(self, node):
417 423 marks = []
418 424 for bookmark, n in self._bookmarks.iteritems():
419 425 if n == node:
420 426 marks.append(bookmark)
421 427 return sorted(marks)
422 428
423 429 def _branchtags(self, partial, lrev):
424 430 # TODO: rename this function?
425 431 tiprev = len(self) - 1
426 432 if lrev != tiprev:
427 433 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
428 434 self._updatebranchcache(partial, ctxgen)
429 435 self._writebranchcache(partial, self.changelog.tip(), tiprev)
430 436
431 437 return partial
432 438
433 439 def updatebranchcache(self):
434 440 tip = self.changelog.tip()
435 441 if self._branchcache is not None and self._branchcachetip == tip:
436 442 return self._branchcache
437 443
438 444 oldtip = self._branchcachetip
439 445 self._branchcachetip = tip
440 446 if oldtip is None or oldtip not in self.changelog.nodemap:
441 447 partial, last, lrev = self._readbranchcache()
442 448 else:
443 449 lrev = self.changelog.rev(oldtip)
444 450 partial = self._branchcache
445 451
446 452 self._branchtags(partial, lrev)
447 453 # this private cache holds all heads (not just tips)
448 454 self._branchcache = partial
449 455
450 456 def branchmap(self):
451 457 '''returns a dictionary {branch: [branchheads]}'''
452 458 self.updatebranchcache()
453 459 return self._branchcache
454 460
455 461 def branchtags(self):
456 462 '''return a dict where branch names map to the tipmost head of
457 463 the branch, open heads come before closed'''
458 464 bt = {}
459 465 for bn, heads in self.branchmap().iteritems():
460 466 tip = heads[-1]
461 467 for h in reversed(heads):
462 468 if 'close' not in self.changelog.read(h)[5]:
463 469 tip = h
464 470 break
465 471 bt[bn] = tip
466 472 return bt
467 473
468 474 def _readbranchcache(self):
469 475 partial = {}
470 476 try:
471 477 f = self.opener("cache/branchheads")
472 478 lines = f.read().split('\n')
473 479 f.close()
474 480 except (IOError, OSError):
475 481 return {}, nullid, nullrev
476 482
477 483 try:
478 484 last, lrev = lines.pop(0).split(" ", 1)
479 485 last, lrev = bin(last), int(lrev)
480 486 if lrev >= len(self) or self[lrev].node() != last:
481 487 # invalidate the cache
482 488 raise ValueError('invalidating branch cache (tip differs)')
483 489 for l in lines:
484 490 if not l:
485 491 continue
486 492 node, label = l.split(" ", 1)
487 493 label = encoding.tolocal(label.strip())
488 494 partial.setdefault(label, []).append(bin(node))
489 495 except KeyboardInterrupt:
490 496 raise
491 497 except Exception, inst:
492 498 if self.ui.debugflag:
493 499 self.ui.warn(str(inst), '\n')
494 500 partial, last, lrev = {}, nullid, nullrev
495 501 return partial, last, lrev
496 502
497 503 def _writebranchcache(self, branches, tip, tiprev):
498 504 try:
499 505 f = self.opener("cache/branchheads", "w", atomictemp=True)
500 506 f.write("%s %s\n" % (hex(tip), tiprev))
501 507 for label, nodes in branches.iteritems():
502 508 for node in nodes:
503 509 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
504 510 f.rename()
505 511 except (IOError, OSError):
506 512 pass
507 513
508 514 def _updatebranchcache(self, partial, ctxgen):
509 515 # collect new branch entries
510 516 newbranches = {}
511 517 for c in ctxgen:
512 518 newbranches.setdefault(c.branch(), []).append(c.node())
513 519 # if older branchheads are reachable from new ones, they aren't
514 520 # really branchheads. Note checking parents is insufficient:
515 521 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
516 522 for branch, newnodes in newbranches.iteritems():
517 523 bheads = partial.setdefault(branch, [])
518 524 bheads.extend(newnodes)
519 525 if len(bheads) <= 1:
520 526 continue
521 527 bheads = sorted(bheads, key=lambda x: self[x].rev())
522 528 # starting from tip means fewer passes over reachable
523 529 while newnodes:
524 530 latest = newnodes.pop()
525 531 if latest not in bheads:
526 532 continue
527 533 minbhrev = self[bheads[0]].node()
528 534 reachable = self.changelog.reachable(latest, minbhrev)
529 535 reachable.remove(latest)
530 536 if reachable:
531 537 bheads = [b for b in bheads if b not in reachable]
532 538 partial[branch] = bheads
533 539
534 540 def lookup(self, key):
535 541 if isinstance(key, int):
536 542 return self.changelog.node(key)
537 543 elif key == '.':
538 544 return self.dirstate.p1()
539 545 elif key == 'null':
540 546 return nullid
541 547 elif key == 'tip':
542 548 return self.changelog.tip()
543 549 n = self.changelog._match(key)
544 550 if n:
545 551 return n
546 552 if key in self._bookmarks:
547 553 return self._bookmarks[key]
548 554 if key in self.tags():
549 555 return self.tags()[key]
550 556 if key in self.branchtags():
551 557 return self.branchtags()[key]
552 558 n = self.changelog._partialmatch(key)
553 559 if n:
554 560 return n
555 561
556 562 # can't find key, check if it might have come from damaged dirstate
557 563 if key in self.dirstate.parents():
558 564 raise error.Abort(_("working directory has unknown parent '%s'!")
559 565 % short(key))
560 566 try:
561 567 if len(key) == 20:
562 568 key = hex(key)
563 569 except TypeError:
564 570 pass
565 571 raise error.RepoLookupError(_("unknown revision '%s'") % key)
566 572
567 573 def lookupbranch(self, key, remote=None):
568 574 repo = remote or self
569 575 if key in repo.branchmap():
570 576 return key
571 577
572 578 repo = (remote and remote.local()) and remote or self
573 579 return repo[key].branch()
574 580
575 581 def known(self, nodes):
576 582 nm = self.changelog.nodemap
577 583 return [(n in nm) for n in nodes]
578 584
579 585 def local(self):
580 586 return self
581 587
582 588 def join(self, f):
583 589 return os.path.join(self.path, f)
584 590
585 591 def wjoin(self, f):
586 592 return os.path.join(self.root, f)
587 593
588 594 def file(self, f):
589 595 if f[0] == '/':
590 596 f = f[1:]
591 597 return filelog.filelog(self.sopener, f)
592 598
593 599 def changectx(self, changeid):
594 600 return self[changeid]
595 601
596 602 def parents(self, changeid=None):
597 603 '''get list of changectxs for parents of changeid'''
598 604 return self[changeid].parents()
599 605
600 606 def filectx(self, path, changeid=None, fileid=None):
601 607 """changeid can be a changeset revision, node, or tag.
602 608 fileid can be a file revision or node."""
603 609 return context.filectx(self, path, changeid, fileid)
604 610
605 611 def getcwd(self):
606 612 return self.dirstate.getcwd()
607 613
608 614 def pathto(self, f, cwd=None):
609 615 return self.dirstate.pathto(f, cwd)
610 616
611 617 def wfile(self, f, mode='r'):
612 618 return self.wopener(f, mode)
613 619
614 620 def _link(self, f):
615 621 return os.path.islink(self.wjoin(f))
616 622
617 623 def _loadfilter(self, filter):
618 624 if filter not in self.filterpats:
619 625 l = []
620 626 for pat, cmd in self.ui.configitems(filter):
621 627 if cmd == '!':
622 628 continue
623 629 mf = matchmod.match(self.root, '', [pat])
624 630 fn = None
625 631 params = cmd
626 632 for name, filterfn in self._datafilters.iteritems():
627 633 if cmd.startswith(name):
628 634 fn = filterfn
629 635 params = cmd[len(name):].lstrip()
630 636 break
631 637 if not fn:
632 638 fn = lambda s, c, **kwargs: util.filter(s, c)
633 639 # Wrap old filters not supporting keyword arguments
634 640 if not inspect.getargspec(fn)[2]:
635 641 oldfn = fn
636 642 fn = lambda s, c, **kwargs: oldfn(s, c)
637 643 l.append((mf, fn, params))
638 644 self.filterpats[filter] = l
639 645 return self.filterpats[filter]
640 646
641 647 def _filter(self, filterpats, filename, data):
642 648 for mf, fn, cmd in filterpats:
643 649 if mf(filename):
644 650 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
645 651 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
646 652 break
647 653
648 654 return data
649 655
650 656 @propertycache
651 657 def _encodefilterpats(self):
652 658 return self._loadfilter('encode')
653 659
654 660 @propertycache
655 661 def _decodefilterpats(self):
656 662 return self._loadfilter('decode')
657 663
658 664 def adddatafilter(self, name, filter):
659 665 self._datafilters[name] = filter
660 666
661 667 def wread(self, filename):
662 668 if self._link(filename):
663 669 data = os.readlink(self.wjoin(filename))
664 670 else:
665 671 data = self.wopener.read(filename)
666 672 return self._filter(self._encodefilterpats, filename, data)
667 673
668 674 def wwrite(self, filename, data, flags):
669 675 data = self._filter(self._decodefilterpats, filename, data)
670 676 if 'l' in flags:
671 677 self.wopener.symlink(data, filename)
672 678 else:
673 679 self.wopener.write(filename, data)
674 680 if 'x' in flags:
675 681 util.setflags(self.wjoin(filename), False, True)
676 682
677 683 def wwritedata(self, filename, data):
678 684 return self._filter(self._decodefilterpats, filename, data)
679 685
680 686 def transaction(self, desc):
681 687 tr = self._transref and self._transref() or None
682 688 if tr and tr.running():
683 689 return tr.nest()
684 690
685 691 # abort here if the journal already exists
686 692 if os.path.exists(self.sjoin("journal")):
687 693 raise error.RepoError(
688 694 _("abandoned transaction found - run hg recover"))
689 695
690 696 journalfiles = self._writejournal(desc)
691 697 renames = [(x, undoname(x)) for x in journalfiles]
692 698
693 699 tr = transaction.transaction(self.ui.warn, self.sopener,
694 700 self.sjoin("journal"),
695 701 aftertrans(renames),
696 702 self.store.createmode)
697 703 self._transref = weakref.ref(tr)
698 704 return tr
699 705
700 706 def _writejournal(self, desc):
701 707 # save dirstate for rollback
702 708 try:
703 709 ds = self.opener.read("dirstate")
704 710 except IOError:
705 711 ds = ""
706 712 self.opener.write("journal.dirstate", ds)
707 713 self.opener.write("journal.branch",
708 714 encoding.fromlocal(self.dirstate.branch()))
709 715 self.opener.write("journal.desc",
710 716 "%d\n%s\n" % (len(self), desc))
711 717
712 718 bkname = self.join('bookmarks')
713 719 if os.path.exists(bkname):
714 720 util.copyfile(bkname, self.join('journal.bookmarks'))
715 721 else:
716 722 self.opener.write('journal.bookmarks', '')
717 723
718 724 return (self.sjoin('journal'), self.join('journal.dirstate'),
719 725 self.join('journal.branch'), self.join('journal.desc'),
720 726 self.join('journal.bookmarks'))
721 727
722 728 def recover(self):
723 729 lock = self.lock()
724 730 try:
725 731 if os.path.exists(self.sjoin("journal")):
726 732 self.ui.status(_("rolling back interrupted transaction\n"))
727 733 transaction.rollback(self.sopener, self.sjoin("journal"),
728 734 self.ui.warn)
729 735 self.invalidate()
730 736 return True
731 737 else:
732 738 self.ui.warn(_("no interrupted transaction available\n"))
733 739 return False
734 740 finally:
735 741 lock.release()
736 742
737 743 def rollback(self, dryrun=False):
738 744 wlock = lock = None
739 745 try:
740 746 wlock = self.wlock()
741 747 lock = self.lock()
742 748 if os.path.exists(self.sjoin("undo")):
743 749 try:
744 750 args = self.opener.read("undo.desc").splitlines()
745 751 if len(args) >= 3 and self.ui.verbose:
746 752 desc = _("repository tip rolled back to revision %s"
747 753 " (undo %s: %s)\n") % (
748 754 int(args[0]) - 1, args[1], args[2])
749 755 elif len(args) >= 2:
750 756 desc = _("repository tip rolled back to revision %s"
751 757 " (undo %s)\n") % (
752 758 int(args[0]) - 1, args[1])
753 759 except IOError:
754 760 desc = _("rolling back unknown transaction\n")
755 761 self.ui.status(desc)
756 762 if dryrun:
757 763 return
758 764 transaction.rollback(self.sopener, self.sjoin("undo"),
759 765 self.ui.warn)
760 766 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
761 767 if os.path.exists(self.join('undo.bookmarks')):
762 768 util.rename(self.join('undo.bookmarks'),
763 769 self.join('bookmarks'))
764 770 try:
765 771 branch = self.opener.read("undo.branch")
766 772 self.dirstate.setbranch(branch)
767 773 except IOError:
768 774 self.ui.warn(_("named branch could not be reset, "
769 775 "current branch is still: %s\n")
770 776 % self.dirstate.branch())
771 777 self.invalidate()
772 778 self.dirstate.invalidate()
773 779 self.destroyed()
774 780 parents = tuple([p.rev() for p in self.parents()])
775 781 if len(parents) > 1:
776 782 self.ui.status(_("working directory now based on "
777 783 "revisions %d and %d\n") % parents)
778 784 else:
779 785 self.ui.status(_("working directory now based on "
780 786 "revision %d\n") % parents)
781 787 else:
782 788 self.ui.warn(_("no rollback information available\n"))
783 789 return 1
784 790 finally:
785 791 release(lock, wlock)
786 792
787 793 def invalidatecaches(self):
788 794 self._tags = None
789 795 self._tagtypes = None
790 796 self.nodetagscache = None
791 797 self._branchcache = None # in UTF-8
792 798 self._branchcachetip = None
793 799
794 800 def invalidate(self):
795 801 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
796 802 if a in self.__dict__:
797 803 delattr(self, a)
798 804 self.invalidatecaches()
799 805
800 806 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
801 807 try:
802 808 l = lock.lock(lockname, 0, releasefn, desc=desc)
803 809 except error.LockHeld, inst:
804 810 if not wait:
805 811 raise
806 812 self.ui.warn(_("waiting for lock on %s held by %r\n") %
807 813 (desc, inst.locker))
808 814 # default to 600 seconds timeout
809 815 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
810 816 releasefn, desc=desc)
811 817 if acquirefn:
812 818 acquirefn()
813 819 return l
814 820
815 821 def lock(self, wait=True):
816 822 '''Lock the repository store (.hg/store) and return a weak reference
817 823 to the lock. Use this before modifying the store (e.g. committing or
818 824 stripping). If you are opening a transaction, get a lock as well.)'''
819 825 l = self._lockref and self._lockref()
820 826 if l is not None and l.held:
821 827 l.lock()
822 828 return l
823 829
824 830 l = self._lock(self.sjoin("lock"), wait, self.store.write,
825 831 self.invalidate, _('repository %s') % self.origroot)
826 832 self._lockref = weakref.ref(l)
827 833 return l
828 834
829 835 def wlock(self, wait=True):
830 836 '''Lock the non-store parts of the repository (everything under
831 837 .hg except .hg/store) and return a weak reference to the lock.
832 838 Use this before modifying files in .hg.'''
833 839 l = self._wlockref and self._wlockref()
834 840 if l is not None and l.held:
835 841 l.lock()
836 842 return l
837 843
838 844 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
839 845 self.dirstate.invalidate, _('working directory of %s') %
840 846 self.origroot)
841 847 self._wlockref = weakref.ref(l)
842 848 return l
843 849
844 850 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
845 851 """
846 852 commit an individual file as part of a larger transaction
847 853 """
848 854
849 855 fname = fctx.path()
850 856 text = fctx.data()
851 857 flog = self.file(fname)
852 858 fparent1 = manifest1.get(fname, nullid)
853 859 fparent2 = fparent2o = manifest2.get(fname, nullid)
854 860
855 861 meta = {}
856 862 copy = fctx.renamed()
857 863 if copy and copy[0] != fname:
858 864 # Mark the new revision of this file as a copy of another
859 865 # file. This copy data will effectively act as a parent
860 866 # of this new revision. If this is a merge, the first
861 867 # parent will be the nullid (meaning "look up the copy data")
862 868 # and the second one will be the other parent. For example:
863 869 #
864 870 # 0 --- 1 --- 3 rev1 changes file foo
865 871 # \ / rev2 renames foo to bar and changes it
866 872 # \- 2 -/ rev3 should have bar with all changes and
867 873 # should record that bar descends from
868 874 # bar in rev2 and foo in rev1
869 875 #
870 876 # this allows this merge to succeed:
871 877 #
872 878 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
873 879 # \ / merging rev3 and rev4 should use bar@rev2
874 880 # \- 2 --- 4 as the merge base
875 881 #
876 882
877 883 cfname = copy[0]
878 884 crev = manifest1.get(cfname)
879 885 newfparent = fparent2
880 886
881 887 if manifest2: # branch merge
882 888 if fparent2 == nullid or crev is None: # copied on remote side
883 889 if cfname in manifest2:
884 890 crev = manifest2[cfname]
885 891 newfparent = fparent1
886 892
887 893 # find source in nearest ancestor if we've lost track
888 894 if not crev:
889 895 self.ui.debug(" %s: searching for copy revision for %s\n" %
890 896 (fname, cfname))
891 897 for ancestor in self[None].ancestors():
892 898 if cfname in ancestor:
893 899 crev = ancestor[cfname].filenode()
894 900 break
895 901
896 902 if crev:
897 903 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
898 904 meta["copy"] = cfname
899 905 meta["copyrev"] = hex(crev)
900 906 fparent1, fparent2 = nullid, newfparent
901 907 else:
902 908 self.ui.warn(_("warning: can't find ancestor for '%s' "
903 909 "copied from '%s'!\n") % (fname, cfname))
904 910
905 911 elif fparent2 != nullid:
906 912 # is one parent an ancestor of the other?
907 913 fparentancestor = flog.ancestor(fparent1, fparent2)
908 914 if fparentancestor == fparent1:
909 915 fparent1, fparent2 = fparent2, nullid
910 916 elif fparentancestor == fparent2:
911 917 fparent2 = nullid
912 918
913 919 # is the file changed?
914 920 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
915 921 changelist.append(fname)
916 922 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
917 923
918 924 # are just the flags changed during merge?
919 925 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
920 926 changelist.append(fname)
921 927
922 928 return fparent1
923 929
924 930 def commit(self, text="", user=None, date=None, match=None, force=False,
925 931 editor=False, extra={}):
926 932 """Add a new revision to current repository.
927 933
928 934 Revision information is gathered from the working directory,
929 935 match can be used to filter the committed files. If editor is
930 936 supplied, it is called to get a commit message.
931 937 """
932 938
933 939 def fail(f, msg):
934 940 raise util.Abort('%s: %s' % (f, msg))
935 941
936 942 if not match:
937 943 match = matchmod.always(self.root, '')
938 944
939 945 if not force:
940 946 vdirs = []
941 947 match.dir = vdirs.append
942 948 match.bad = fail
943 949
944 950 wlock = self.wlock()
945 951 try:
946 952 wctx = self[None]
947 953 merge = len(wctx.parents()) > 1
948 954
949 955 if (not force and merge and match and
950 956 (match.files() or match.anypats())):
951 957 raise util.Abort(_('cannot partially commit a merge '
952 958 '(do not specify files or patterns)'))
953 959
954 960 changes = self.status(match=match, clean=force)
955 961 if force:
956 962 changes[0].extend(changes[6]) # mq may commit unchanged files
957 963
958 964 # check subrepos
959 965 subs = []
960 966 removedsubs = set()
961 967 if '.hgsub' in wctx:
962 968 # only manage subrepos and .hgsubstate if .hgsub is present
963 969 for p in wctx.parents():
964 970 removedsubs.update(s for s in p.substate if match(s))
965 971 for s in wctx.substate:
966 972 removedsubs.discard(s)
967 973 if match(s) and wctx.sub(s).dirty():
968 974 subs.append(s)
969 975 if (subs or removedsubs):
970 976 if (not match('.hgsub') and
971 977 '.hgsub' in (wctx.modified() + wctx.added())):
972 978 raise util.Abort(
973 979 _("can't commit subrepos without .hgsub"))
974 980 if '.hgsubstate' not in changes[0]:
975 981 changes[0].insert(0, '.hgsubstate')
976 982 if '.hgsubstate' in changes[2]:
977 983 changes[2].remove('.hgsubstate')
978 984 elif '.hgsub' in changes[2]:
979 985 # clean up .hgsubstate when .hgsub is removed
980 986 if ('.hgsubstate' in wctx and
981 987 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
982 988 changes[2].insert(0, '.hgsubstate')
983 989
984 990 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
985 991 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
986 992 if changedsubs:
987 993 raise util.Abort(_("uncommitted changes in subrepo %s")
988 994 % changedsubs[0])
989 995
990 996 # make sure all explicit patterns are matched
991 997 if not force and match.files():
992 998 matched = set(changes[0] + changes[1] + changes[2])
993 999
994 1000 for f in match.files():
995 1001 if f == '.' or f in matched or f in wctx.substate:
996 1002 continue
997 1003 if f in changes[3]: # missing
998 1004 fail(f, _('file not found!'))
999 1005 if f in vdirs: # visited directory
1000 1006 d = f + '/'
1001 1007 for mf in matched:
1002 1008 if mf.startswith(d):
1003 1009 break
1004 1010 else:
1005 1011 fail(f, _("no match under directory!"))
1006 1012 elif f not in self.dirstate:
1007 1013 fail(f, _("file not tracked!"))
1008 1014
1009 1015 if (not force and not extra.get("close") and not merge
1010 1016 and not (changes[0] or changes[1] or changes[2])
1011 1017 and wctx.branch() == wctx.p1().branch()):
1012 1018 return None
1013 1019
1014 1020 ms = mergemod.mergestate(self)
1015 1021 for f in changes[0]:
1016 1022 if f in ms and ms[f] == 'u':
1017 1023 raise util.Abort(_("unresolved merge conflicts "
1018 1024 "(see hg help resolve)"))
1019 1025
1020 1026 cctx = context.workingctx(self, text, user, date, extra, changes)
1021 1027 if editor:
1022 1028 cctx._text = editor(self, cctx, subs)
1023 1029 edited = (text != cctx._text)
1024 1030
1025 1031 # commit subs
1026 1032 if subs or removedsubs:
1027 1033 state = wctx.substate.copy()
1028 1034 for s in sorted(subs):
1029 1035 sub = wctx.sub(s)
1030 1036 self.ui.status(_('committing subrepository %s\n') %
1031 1037 subrepo.subrelpath(sub))
1032 1038 sr = sub.commit(cctx._text, user, date)
1033 1039 state[s] = (state[s][0], sr)
1034 1040 subrepo.writestate(self, state)
1035 1041
1036 1042 # Save commit message in case this transaction gets rolled back
1037 1043 # (e.g. by a pretxncommit hook). Leave the content alone on
1038 1044 # the assumption that the user will use the same editor again.
1039 1045 msgfn = self.savecommitmessage(cctx._text)
1040 1046
1041 1047 p1, p2 = self.dirstate.parents()
1042 1048 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1043 1049 try:
1044 1050 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1045 1051 ret = self.commitctx(cctx, True)
1046 1052 except:
1047 1053 if edited:
1048 1054 self.ui.write(
1049 1055 _('note: commit message saved in %s\n') % msgfn)
1050 1056 raise
1051 1057
1052 1058 # update bookmarks, dirstate and mergestate
1053 1059 bookmarks.update(self, p1, ret)
1054 1060 for f in changes[0] + changes[1]:
1055 1061 self.dirstate.normal(f)
1056 1062 for f in changes[2]:
1057 1063 self.dirstate.drop(f)
1058 1064 self.dirstate.setparents(ret)
1059 1065 ms.reset()
1060 1066 finally:
1061 1067 wlock.release()
1062 1068
1063 1069 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1064 1070 return ret
1065 1071
1066 1072 def commitctx(self, ctx, error=False):
1067 1073 """Add a new revision to current repository.
1068 1074 Revision information is passed via the context argument.
1069 1075 """
1070 1076
1071 1077 tr = lock = None
1072 1078 removed = list(ctx.removed())
1073 1079 p1, p2 = ctx.p1(), ctx.p2()
1074 1080 user = ctx.user()
1075 1081
1076 1082 lock = self.lock()
1077 1083 try:
1078 1084 tr = self.transaction("commit")
1079 1085 trp = weakref.proxy(tr)
1080 1086
1081 1087 if ctx.files():
1082 1088 m1 = p1.manifest().copy()
1083 1089 m2 = p2.manifest()
1084 1090
1085 1091 # check in files
1086 1092 new = {}
1087 1093 changed = []
1088 1094 linkrev = len(self)
1089 1095 for f in sorted(ctx.modified() + ctx.added()):
1090 1096 self.ui.note(f + "\n")
1091 1097 try:
1092 1098 fctx = ctx[f]
1093 1099 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1094 1100 changed)
1095 1101 m1.set(f, fctx.flags())
1096 1102 except OSError, inst:
1097 1103 self.ui.warn(_("trouble committing %s!\n") % f)
1098 1104 raise
1099 1105 except IOError, inst:
1100 1106 errcode = getattr(inst, 'errno', errno.ENOENT)
1101 1107 if error or errcode and errcode != errno.ENOENT:
1102 1108 self.ui.warn(_("trouble committing %s!\n") % f)
1103 1109 raise
1104 1110 else:
1105 1111 removed.append(f)
1106 1112
1107 1113 # update manifest
1108 1114 m1.update(new)
1109 1115 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1110 1116 drop = [f for f in removed if f in m1]
1111 1117 for f in drop:
1112 1118 del m1[f]
1113 1119 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1114 1120 p2.manifestnode(), (new, drop))
1115 1121 files = changed + removed
1116 1122 else:
1117 1123 mn = p1.manifestnode()
1118 1124 files = []
1119 1125
1120 1126 # update changelog
1121 1127 self.changelog.delayupdate()
1122 1128 n = self.changelog.add(mn, files, ctx.description(),
1123 1129 trp, p1.node(), p2.node(),
1124 1130 user, ctx.date(), ctx.extra().copy())
1125 1131 p = lambda: self.changelog.writepending() and self.root or ""
1126 1132 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1127 1133 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1128 1134 parent2=xp2, pending=p)
1129 1135 self.changelog.finalize(trp)
1130 1136 tr.close()
1131 1137
1132 1138 if self._branchcache:
1133 1139 self.updatebranchcache()
1134 1140 return n
1135 1141 finally:
1136 1142 if tr:
1137 1143 tr.release()
1138 1144 lock.release()
1139 1145
1140 1146 def destroyed(self):
1141 1147 '''Inform the repository that nodes have been destroyed.
1142 1148 Intended for use by strip and rollback, so there's a common
1143 1149 place for anything that has to be done after destroying history.'''
1144 1150 # XXX it might be nice if we could take the list of destroyed
1145 1151 # nodes, but I don't see an easy way for rollback() to do that
1146 1152
1147 1153 # Ensure the persistent tag cache is updated. Doing it now
1148 1154 # means that the tag cache only has to worry about destroyed
1149 1155 # heads immediately after a strip/rollback. That in turn
1150 1156 # guarantees that "cachetip == currenttip" (comparing both rev
1151 1157 # and node) always means no nodes have been added or destroyed.
1152 1158
1153 1159 # XXX this is suboptimal when qrefresh'ing: we strip the current
1154 1160 # head, refresh the tag cache, then immediately add a new head.
1155 1161 # But I think doing it this way is necessary for the "instant
1156 1162 # tag cache retrieval" case to work.
1157 1163 self.invalidatecaches()
1158 1164
1159 1165 def walk(self, match, node=None):
1160 1166 '''
1161 1167 walk recursively through the directory tree or a given
1162 1168 changeset, finding all files matched by the match
1163 1169 function
1164 1170 '''
1165 1171 return self[node].walk(match)
1166 1172
1167 1173 def status(self, node1='.', node2=None, match=None,
1168 1174 ignored=False, clean=False, unknown=False,
1169 1175 listsubrepos=False):
1170 1176 """return status of files between two nodes or node and working directory
1171 1177
1172 1178 If node1 is None, use the first dirstate parent instead.
1173 1179 If node2 is None, compare node1 with working directory.
1174 1180 """
1175 1181
1176 1182 def mfmatches(ctx):
1177 1183 mf = ctx.manifest().copy()
1178 1184 for fn in mf.keys():
1179 1185 if not match(fn):
1180 1186 del mf[fn]
1181 1187 return mf
1182 1188
1183 1189 if isinstance(node1, context.changectx):
1184 1190 ctx1 = node1
1185 1191 else:
1186 1192 ctx1 = self[node1]
1187 1193 if isinstance(node2, context.changectx):
1188 1194 ctx2 = node2
1189 1195 else:
1190 1196 ctx2 = self[node2]
1191 1197
1192 1198 working = ctx2.rev() is None
1193 1199 parentworking = working and ctx1 == self['.']
1194 1200 match = match or matchmod.always(self.root, self.getcwd())
1195 1201 listignored, listclean, listunknown = ignored, clean, unknown
1196 1202
1197 1203 # load earliest manifest first for caching reasons
1198 1204 if not working and ctx2.rev() < ctx1.rev():
1199 1205 ctx2.manifest()
1200 1206
1201 1207 if not parentworking:
1202 1208 def bad(f, msg):
1203 1209 if f not in ctx1:
1204 1210 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1205 1211 match.bad = bad
1206 1212
1207 1213 if working: # we need to scan the working dir
1208 1214 subrepos = []
1209 1215 if '.hgsub' in self.dirstate:
1210 1216 subrepos = ctx2.substate.keys()
1211 1217 s = self.dirstate.status(match, subrepos, listignored,
1212 1218 listclean, listunknown)
1213 1219 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1214 1220
1215 1221 # check for any possibly clean files
1216 1222 if parentworking and cmp:
1217 1223 fixup = []
1218 1224 # do a full compare of any files that might have changed
1219 1225 for f in sorted(cmp):
1220 1226 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1221 1227 or ctx1[f].cmp(ctx2[f])):
1222 1228 modified.append(f)
1223 1229 else:
1224 1230 fixup.append(f)
1225 1231
1226 1232 # update dirstate for files that are actually clean
1227 1233 if fixup:
1228 1234 if listclean:
1229 1235 clean += fixup
1230 1236
1231 1237 try:
1232 1238 # updating the dirstate is optional
1233 1239 # so we don't wait on the lock
1234 1240 wlock = self.wlock(False)
1235 1241 try:
1236 1242 for f in fixup:
1237 1243 self.dirstate.normal(f)
1238 1244 finally:
1239 1245 wlock.release()
1240 1246 except error.LockError:
1241 1247 pass
1242 1248
1243 1249 if not parentworking:
1244 1250 mf1 = mfmatches(ctx1)
1245 1251 if working:
1246 1252 # we are comparing working dir against non-parent
1247 1253 # generate a pseudo-manifest for the working dir
1248 1254 mf2 = mfmatches(self['.'])
1249 1255 for f in cmp + modified + added:
1250 1256 mf2[f] = None
1251 1257 mf2.set(f, ctx2.flags(f))
1252 1258 for f in removed:
1253 1259 if f in mf2:
1254 1260 del mf2[f]
1255 1261 else:
1256 1262 # we are comparing two revisions
1257 1263 deleted, unknown, ignored = [], [], []
1258 1264 mf2 = mfmatches(ctx2)
1259 1265
1260 1266 modified, added, clean = [], [], []
1261 1267 for fn in mf2:
1262 1268 if fn in mf1:
1263 1269 if (fn not in deleted and
1264 1270 (mf1.flags(fn) != mf2.flags(fn) or
1265 1271 (mf1[fn] != mf2[fn] and
1266 1272 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1267 1273 modified.append(fn)
1268 1274 elif listclean:
1269 1275 clean.append(fn)
1270 1276 del mf1[fn]
1271 1277 elif fn not in deleted:
1272 1278 added.append(fn)
1273 1279 removed = mf1.keys()
1274 1280
1275 1281 r = modified, added, removed, deleted, unknown, ignored, clean
1276 1282
1277 1283 if listsubrepos:
1278 1284 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1279 1285 if working:
1280 1286 rev2 = None
1281 1287 else:
1282 1288 rev2 = ctx2.substate[subpath][1]
1283 1289 try:
1284 1290 submatch = matchmod.narrowmatcher(subpath, match)
1285 1291 s = sub.status(rev2, match=submatch, ignored=listignored,
1286 1292 clean=listclean, unknown=listunknown,
1287 1293 listsubrepos=True)
1288 1294 for rfiles, sfiles in zip(r, s):
1289 1295 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1290 1296 except error.LookupError:
1291 1297 self.ui.status(_("skipping missing subrepository: %s\n")
1292 1298 % subpath)
1293 1299
1294 1300 for l in r:
1295 1301 l.sort()
1296 1302 return r
1297 1303
1298 1304 def heads(self, start=None):
1299 1305 heads = self.changelog.heads(start)
1300 1306 # sort the output in rev descending order
1301 1307 return sorted(heads, key=self.changelog.rev, reverse=True)
1302 1308
1303 1309 def branchheads(self, branch=None, start=None, closed=False):
1304 1310 '''return a (possibly filtered) list of heads for the given branch
1305 1311
1306 1312 Heads are returned in topological order, from newest to oldest.
1307 1313 If branch is None, use the dirstate branch.
1308 1314 If start is not None, return only heads reachable from start.
1309 1315 If closed is True, return heads that are marked as closed as well.
1310 1316 '''
1311 1317 if branch is None:
1312 1318 branch = self[None].branch()
1313 1319 branches = self.branchmap()
1314 1320 if branch not in branches:
1315 1321 return []
1316 1322 # the cache returns heads ordered lowest to highest
1317 1323 bheads = list(reversed(branches[branch]))
1318 1324 if start is not None:
1319 1325 # filter out the heads that cannot be reached from startrev
1320 1326 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1321 1327 bheads = [h for h in bheads if h in fbheads]
1322 1328 if not closed:
1323 1329 bheads = [h for h in bheads if
1324 1330 ('close' not in self.changelog.read(h)[5])]
1325 1331 return bheads
1326 1332
1327 1333 def branches(self, nodes):
1328 1334 if not nodes:
1329 1335 nodes = [self.changelog.tip()]
1330 1336 b = []
1331 1337 for n in nodes:
1332 1338 t = n
1333 1339 while True:
1334 1340 p = self.changelog.parents(n)
1335 1341 if p[1] != nullid or p[0] == nullid:
1336 1342 b.append((t, n, p[0], p[1]))
1337 1343 break
1338 1344 n = p[0]
1339 1345 return b
1340 1346
1341 1347 def between(self, pairs):
1342 1348 r = []
1343 1349
1344 1350 for top, bottom in pairs:
1345 1351 n, l, i = top, [], 0
1346 1352 f = 1
1347 1353
1348 1354 while n != bottom and n != nullid:
1349 1355 p = self.changelog.parents(n)[0]
1350 1356 if i == f:
1351 1357 l.append(n)
1352 1358 f = f * 2
1353 1359 n = p
1354 1360 i += 1
1355 1361
1356 1362 r.append(l)
1357 1363
1358 1364 return r
1359 1365
1360 1366 def pull(self, remote, heads=None, force=False):
1361 1367 lock = self.lock()
1362 1368 try:
1363 1369 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1364 1370 force=force)
1365 1371 common, fetch, rheads = tmp
1366 1372 if not fetch:
1367 1373 self.ui.status(_("no changes found\n"))
1368 1374 result = 0
1369 1375 else:
1370 1376 if heads is None and list(common) == [nullid]:
1371 1377 self.ui.status(_("requesting all changes\n"))
1372 1378 elif heads is None and remote.capable('changegroupsubset'):
1373 1379 # issue1320, avoid a race if remote changed after discovery
1374 1380 heads = rheads
1375 1381
1376 1382 if remote.capable('getbundle'):
1377 1383 cg = remote.getbundle('pull', common=common,
1378 1384 heads=heads or rheads)
1379 1385 elif heads is None:
1380 1386 cg = remote.changegroup(fetch, 'pull')
1381 1387 elif not remote.capable('changegroupsubset'):
1382 1388 raise util.Abort(_("partial pull cannot be done because "
1383 1389 "other repository doesn't support "
1384 1390 "changegroupsubset."))
1385 1391 else:
1386 1392 cg = remote.changegroupsubset(fetch, heads, 'pull')
1387 1393 result = self.addchangegroup(cg, 'pull', remote.url(),
1388 1394 lock=lock)
1389 1395 finally:
1390 1396 lock.release()
1391 1397
1392 1398 return result
1393 1399
1394 1400 def checkpush(self, force, revs):
1395 1401 """Extensions can override this function if additional checks have
1396 1402 to be performed before pushing, or call it if they override push
1397 1403 command.
1398 1404 """
1399 1405 pass
1400 1406
1401 1407 def push(self, remote, force=False, revs=None, newbranch=False):
1402 1408 '''Push outgoing changesets (limited by revs) from the current
1403 1409 repository to remote. Return an integer:
1404 1410 - 0 means HTTP error *or* nothing to push
1405 1411 - 1 means we pushed and remote head count is unchanged *or*
1406 1412 we have outgoing changesets but refused to push
1407 1413 - other values as described by addchangegroup()
1408 1414 '''
1409 1415 # there are two ways to push to remote repo:
1410 1416 #
1411 1417 # addchangegroup assumes local user can lock remote
1412 1418 # repo (local filesystem, old ssh servers).
1413 1419 #
1414 1420 # unbundle assumes local user cannot lock remote repo (new ssh
1415 1421 # servers, http servers).
1416 1422
1417 1423 self.checkpush(force, revs)
1418 1424 lock = None
1419 1425 unbundle = remote.capable('unbundle')
1420 1426 if not unbundle:
1421 1427 lock = remote.lock()
1422 1428 try:
1423 1429 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1424 1430 newbranch)
1425 1431 ret = remote_heads
1426 1432 if cg is not None:
1427 1433 if unbundle:
1428 1434 # local repo finds heads on server, finds out what
1429 1435 # revs it must push. once revs transferred, if server
1430 1436 # finds it has different heads (someone else won
1431 1437 # commit/push race), server aborts.
1432 1438 if force:
1433 1439 remote_heads = ['force']
1434 1440 # ssh: return remote's addchangegroup()
1435 1441 # http: return remote's addchangegroup() or 0 for error
1436 1442 ret = remote.unbundle(cg, remote_heads, 'push')
1437 1443 else:
1438 1444 # we return an integer indicating remote head count change
1439 1445 ret = remote.addchangegroup(cg, 'push', self.url(),
1440 1446 lock=lock)
1441 1447 finally:
1442 1448 if lock is not None:
1443 1449 lock.release()
1444 1450
1445 1451 self.ui.debug("checking for updated bookmarks\n")
1446 1452 rb = remote.listkeys('bookmarks')
1447 1453 for k in rb.keys():
1448 1454 if k in self._bookmarks:
1449 1455 nr, nl = rb[k], hex(self._bookmarks[k])
1450 1456 if nr in self:
1451 1457 cr = self[nr]
1452 1458 cl = self[nl]
1453 1459 if cl in cr.descendants():
1454 1460 r = remote.pushkey('bookmarks', k, nr, nl)
1455 1461 if r:
1456 1462 self.ui.status(_("updating bookmark %s\n") % k)
1457 1463 else:
1458 1464 self.ui.warn(_('updating bookmark %s'
1459 1465 ' failed!\n') % k)
1460 1466
1461 1467 return ret
1462 1468
1463 1469 def changegroupinfo(self, nodes, source):
1464 1470 if self.ui.verbose or source == 'bundle':
1465 1471 self.ui.status(_("%d changesets found\n") % len(nodes))
1466 1472 if self.ui.debugflag:
1467 1473 self.ui.debug("list of changesets:\n")
1468 1474 for node in nodes:
1469 1475 self.ui.debug("%s\n" % hex(node))
1470 1476
1471 1477 def changegroupsubset(self, bases, heads, source):
1472 1478 """Compute a changegroup consisting of all the nodes that are
1473 1479 descendants of any of the bases and ancestors of any of the heads.
1474 1480 Return a chunkbuffer object whose read() method will return
1475 1481 successive changegroup chunks.
1476 1482
1477 1483 It is fairly complex as determining which filenodes and which
1478 1484 manifest nodes need to be included for the changeset to be complete
1479 1485 is non-trivial.
1480 1486
1481 1487 Another wrinkle is doing the reverse, figuring out which changeset in
1482 1488 the changegroup a particular filenode or manifestnode belongs to.
1483 1489 """
1484 1490 cl = self.changelog
1485 1491 if not bases:
1486 1492 bases = [nullid]
1487 1493 csets, bases, heads = cl.nodesbetween(bases, heads)
1488 1494 # We assume that all ancestors of bases are known
1489 1495 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1490 1496 return self._changegroupsubset(common, csets, heads, source)
1491 1497
1492 1498 def getbundle(self, source, heads=None, common=None):
1493 1499 """Like changegroupsubset, but returns the set difference between the
1494 1500 ancestors of heads and the ancestors common.
1495 1501
1496 1502 If heads is None, use the local heads. If common is None, use [nullid].
1497 1503
1498 1504 The nodes in common might not all be known locally due to the way the
1499 1505 current discovery protocol works.
1500 1506 """
1501 1507 cl = self.changelog
1502 1508 if common:
1503 1509 nm = cl.nodemap
1504 1510 common = [n for n in common if n in nm]
1505 1511 else:
1506 1512 common = [nullid]
1507 1513 if not heads:
1508 1514 heads = cl.heads()
1509 1515 common, missing = cl.findcommonmissing(common, heads)
1510 1516 if not missing:
1511 1517 return None
1512 1518 return self._changegroupsubset(common, missing, heads, source)
1513 1519
1514 1520 def _changegroupsubset(self, commonrevs, csets, heads, source):
1515 1521
1516 1522 cl = self.changelog
1517 1523 mf = self.manifest
1518 1524 mfs = {} # needed manifests
1519 1525 fnodes = {} # needed file nodes
1520 1526 changedfiles = set()
1521 1527 fstate = ['', {}]
1522 1528 count = [0]
1523 1529
1524 1530 # can we go through the fast path ?
1525 1531 heads.sort()
1526 1532 if heads == sorted(self.heads()):
1527 1533 return self._changegroup(csets, source)
1528 1534
1529 1535 # slow path
1530 1536 self.hook('preoutgoing', throw=True, source=source)
1531 1537 self.changegroupinfo(csets, source)
1532 1538
1533 1539 # filter any nodes that claim to be part of the known set
1534 1540 def prune(revlog, missing):
1535 1541 return [n for n in missing
1536 1542 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1537 1543
1538 1544 def lookup(revlog, x):
1539 1545 if revlog == cl:
1540 1546 c = cl.read(x)
1541 1547 changedfiles.update(c[3])
1542 1548 mfs.setdefault(c[0], x)
1543 1549 count[0] += 1
1544 1550 self.ui.progress(_('bundling'), count[0],
1545 1551 unit=_('changesets'), total=len(csets))
1546 1552 return x
1547 1553 elif revlog == mf:
1548 1554 clnode = mfs[x]
1549 1555 mdata = mf.readfast(x)
1550 1556 for f in changedfiles:
1551 1557 if f in mdata:
1552 1558 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1553 1559 count[0] += 1
1554 1560 self.ui.progress(_('bundling'), count[0],
1555 1561 unit=_('manifests'), total=len(mfs))
1556 1562 return mfs[x]
1557 1563 else:
1558 1564 self.ui.progress(
1559 1565 _('bundling'), count[0], item=fstate[0],
1560 1566 unit=_('files'), total=len(changedfiles))
1561 1567 return fstate[1][x]
1562 1568
1563 1569 bundler = changegroup.bundle10(lookup)
1564 1570 reorder = self.ui.config('bundle', 'reorder', 'auto')
1565 1571 if reorder == 'auto':
1566 1572 reorder = None
1567 1573 else:
1568 1574 reorder = util.parsebool(reorder)
1569 1575
1570 1576 def gengroup():
1571 1577 # Create a changenode group generator that will call our functions
1572 1578 # back to lookup the owning changenode and collect information.
1573 1579 for chunk in cl.group(csets, bundler, reorder=reorder):
1574 1580 yield chunk
1575 1581 self.ui.progress(_('bundling'), None)
1576 1582
1577 1583 # Create a generator for the manifestnodes that calls our lookup
1578 1584 # and data collection functions back.
1579 1585 count[0] = 0
1580 1586 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1581 1587 yield chunk
1582 1588 self.ui.progress(_('bundling'), None)
1583 1589
1584 1590 mfs.clear()
1585 1591
1586 1592 # Go through all our files in order sorted by name.
1587 1593 count[0] = 0
1588 1594 for fname in sorted(changedfiles):
1589 1595 filerevlog = self.file(fname)
1590 1596 if not len(filerevlog):
1591 1597 raise util.Abort(_("empty or missing revlog for %s") % fname)
1592 1598 fstate[0] = fname
1593 1599 fstate[1] = fnodes.pop(fname, {})
1594 1600
1595 1601 nodelist = prune(filerevlog, fstate[1])
1596 1602 if nodelist:
1597 1603 count[0] += 1
1598 1604 yield bundler.fileheader(fname)
1599 1605 for chunk in filerevlog.group(nodelist, bundler, reorder):
1600 1606 yield chunk
1601 1607
1602 1608 # Signal that no more groups are left.
1603 1609 yield bundler.close()
1604 1610 self.ui.progress(_('bundling'), None)
1605 1611
1606 1612 if csets:
1607 1613 self.hook('outgoing', node=hex(csets[0]), source=source)
1608 1614
1609 1615 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1610 1616
1611 1617 def changegroup(self, basenodes, source):
1612 1618 # to avoid a race we use changegroupsubset() (issue1320)
1613 1619 return self.changegroupsubset(basenodes, self.heads(), source)
1614 1620
1615 1621 def _changegroup(self, nodes, source):
1616 1622 """Compute the changegroup of all nodes that we have that a recipient
1617 1623 doesn't. Return a chunkbuffer object whose read() method will return
1618 1624 successive changegroup chunks.
1619 1625
1620 1626 This is much easier than the previous function as we can assume that
1621 1627 the recipient has any changenode we aren't sending them.
1622 1628
1623 1629 nodes is the set of nodes to send"""
1624 1630
1625 1631 cl = self.changelog
1626 1632 mf = self.manifest
1627 1633 mfs = {}
1628 1634 changedfiles = set()
1629 1635 fstate = ['']
1630 1636 count = [0]
1631 1637
1632 1638 self.hook('preoutgoing', throw=True, source=source)
1633 1639 self.changegroupinfo(nodes, source)
1634 1640
1635 1641 revset = set([cl.rev(n) for n in nodes])
1636 1642
1637 1643 def gennodelst(log):
1638 1644 return [log.node(r) for r in log if log.linkrev(r) in revset]
1639 1645
1640 1646 def lookup(revlog, x):
1641 1647 if revlog == cl:
1642 1648 c = cl.read(x)
1643 1649 changedfiles.update(c[3])
1644 1650 mfs.setdefault(c[0], x)
1645 1651 count[0] += 1
1646 1652 self.ui.progress(_('bundling'), count[0],
1647 1653 unit=_('changesets'), total=len(nodes))
1648 1654 return x
1649 1655 elif revlog == mf:
1650 1656 count[0] += 1
1651 1657 self.ui.progress(_('bundling'), count[0],
1652 1658 unit=_('manifests'), total=len(mfs))
1653 1659 return cl.node(revlog.linkrev(revlog.rev(x)))
1654 1660 else:
1655 1661 self.ui.progress(
1656 1662 _('bundling'), count[0], item=fstate[0],
1657 1663 total=len(changedfiles), unit=_('files'))
1658 1664 return cl.node(revlog.linkrev(revlog.rev(x)))
1659 1665
1660 1666 bundler = changegroup.bundle10(lookup)
1661 1667 reorder = self.ui.config('bundle', 'reorder', 'auto')
1662 1668 if reorder == 'auto':
1663 1669 reorder = None
1664 1670 else:
1665 1671 reorder = util.parsebool(reorder)
1666 1672
1667 1673 def gengroup():
1668 1674 '''yield a sequence of changegroup chunks (strings)'''
1669 1675 # construct a list of all changed files
1670 1676
1671 1677 for chunk in cl.group(nodes, bundler, reorder=reorder):
1672 1678 yield chunk
1673 1679 self.ui.progress(_('bundling'), None)
1674 1680
1675 1681 count[0] = 0
1676 1682 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1677 1683 yield chunk
1678 1684 self.ui.progress(_('bundling'), None)
1679 1685
1680 1686 count[0] = 0
1681 1687 for fname in sorted(changedfiles):
1682 1688 filerevlog = self.file(fname)
1683 1689 if not len(filerevlog):
1684 1690 raise util.Abort(_("empty or missing revlog for %s") % fname)
1685 1691 fstate[0] = fname
1686 1692 nodelist = gennodelst(filerevlog)
1687 1693 if nodelist:
1688 1694 count[0] += 1
1689 1695 yield bundler.fileheader(fname)
1690 1696 for chunk in filerevlog.group(nodelist, bundler, reorder):
1691 1697 yield chunk
1692 1698 yield bundler.close()
1693 1699 self.ui.progress(_('bundling'), None)
1694 1700
1695 1701 if nodes:
1696 1702 self.hook('outgoing', node=hex(nodes[0]), source=source)
1697 1703
1698 1704 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1699 1705
1700 1706 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1701 1707 """Add the changegroup returned by source.read() to this repo.
1702 1708 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1703 1709 the URL of the repo where this changegroup is coming from.
1704 1710 If lock is not None, the function takes ownership of the lock
1705 1711 and releases it after the changegroup is added.
1706 1712
1707 1713 Return an integer summarizing the change to this repo:
1708 1714 - nothing changed or no source: 0
1709 1715 - more heads than before: 1+added heads (2..n)
1710 1716 - fewer heads than before: -1-removed heads (-2..-n)
1711 1717 - number of heads stays the same: 1
1712 1718 """
1713 1719 def csmap(x):
1714 1720 self.ui.debug("add changeset %s\n" % short(x))
1715 1721 return len(cl)
1716 1722
1717 1723 def revmap(x):
1718 1724 return cl.rev(x)
1719 1725
1720 1726 if not source:
1721 1727 return 0
1722 1728
1723 1729 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1724 1730
1725 1731 changesets = files = revisions = 0
1726 1732 efiles = set()
1727 1733
1728 1734 # write changelog data to temp files so concurrent readers will not see
1729 1735 # inconsistent view
1730 1736 cl = self.changelog
1731 1737 cl.delayupdate()
1732 1738 oldheads = cl.heads()
1733 1739
1734 1740 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1735 1741 try:
1736 1742 trp = weakref.proxy(tr)
1737 1743 # pull off the changeset group
1738 1744 self.ui.status(_("adding changesets\n"))
1739 1745 clstart = len(cl)
1740 1746 class prog(object):
1741 1747 step = _('changesets')
1742 1748 count = 1
1743 1749 ui = self.ui
1744 1750 total = None
1745 1751 def __call__(self):
1746 1752 self.ui.progress(self.step, self.count, unit=_('chunks'),
1747 1753 total=self.total)
1748 1754 self.count += 1
1749 1755 pr = prog()
1750 1756 source.callback = pr
1751 1757
1752 1758 source.changelogheader()
1753 1759 if (cl.addgroup(source, csmap, trp) is None
1754 1760 and not emptyok):
1755 1761 raise util.Abort(_("received changelog group is empty"))
1756 1762 clend = len(cl)
1757 1763 changesets = clend - clstart
1758 1764 for c in xrange(clstart, clend):
1759 1765 efiles.update(self[c].files())
1760 1766 efiles = len(efiles)
1761 1767 self.ui.progress(_('changesets'), None)
1762 1768
1763 1769 # pull off the manifest group
1764 1770 self.ui.status(_("adding manifests\n"))
1765 1771 pr.step = _('manifests')
1766 1772 pr.count = 1
1767 1773 pr.total = changesets # manifests <= changesets
1768 1774 # no need to check for empty manifest group here:
1769 1775 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1770 1776 # no new manifest will be created and the manifest group will
1771 1777 # be empty during the pull
1772 1778 source.manifestheader()
1773 1779 self.manifest.addgroup(source, revmap, trp)
1774 1780 self.ui.progress(_('manifests'), None)
1775 1781
1776 1782 needfiles = {}
1777 1783 if self.ui.configbool('server', 'validate', default=False):
1778 1784 # validate incoming csets have their manifests
1779 1785 for cset in xrange(clstart, clend):
1780 1786 mfest = self.changelog.read(self.changelog.node(cset))[0]
1781 1787 mfest = self.manifest.readdelta(mfest)
1782 1788 # store file nodes we must see
1783 1789 for f, n in mfest.iteritems():
1784 1790 needfiles.setdefault(f, set()).add(n)
1785 1791
1786 1792 # process the files
1787 1793 self.ui.status(_("adding file changes\n"))
1788 1794 pr.step = _('files')
1789 1795 pr.count = 1
1790 1796 pr.total = efiles
1791 1797 source.callback = None
1792 1798
1793 1799 while True:
1794 1800 chunkdata = source.filelogheader()
1795 1801 if not chunkdata:
1796 1802 break
1797 1803 f = chunkdata["filename"]
1798 1804 self.ui.debug("adding %s revisions\n" % f)
1799 1805 pr()
1800 1806 fl = self.file(f)
1801 1807 o = len(fl)
1802 1808 if fl.addgroup(source, revmap, trp) is None:
1803 1809 raise util.Abort(_("received file revlog group is empty"))
1804 1810 revisions += len(fl) - o
1805 1811 files += 1
1806 1812 if f in needfiles:
1807 1813 needs = needfiles[f]
1808 1814 for new in xrange(o, len(fl)):
1809 1815 n = fl.node(new)
1810 1816 if n in needs:
1811 1817 needs.remove(n)
1812 1818 if not needs:
1813 1819 del needfiles[f]
1814 1820 self.ui.progress(_('files'), None)
1815 1821
1816 1822 for f, needs in needfiles.iteritems():
1817 1823 fl = self.file(f)
1818 1824 for n in needs:
1819 1825 try:
1820 1826 fl.rev(n)
1821 1827 except error.LookupError:
1822 1828 raise util.Abort(
1823 1829 _('missing file data for %s:%s - run hg verify') %
1824 1830 (f, hex(n)))
1825 1831
1826 1832 dh = 0
1827 1833 if oldheads:
1828 1834 heads = cl.heads()
1829 1835 dh = len(heads) - len(oldheads)
1830 1836 for h in heads:
1831 1837 if h not in oldheads and 'close' in self[h].extra():
1832 1838 dh -= 1
1833 1839 htext = ""
1834 1840 if dh:
1835 1841 htext = _(" (%+d heads)") % dh
1836 1842
1837 1843 self.ui.status(_("added %d changesets"
1838 1844 " with %d changes to %d files%s\n")
1839 1845 % (changesets, revisions, files, htext))
1840 1846
1841 1847 if changesets > 0:
1842 1848 p = lambda: cl.writepending() and self.root or ""
1843 1849 self.hook('pretxnchangegroup', throw=True,
1844 1850 node=hex(cl.node(clstart)), source=srctype,
1845 1851 url=url, pending=p)
1846 1852
1847 1853 # make changelog see real files again
1848 1854 cl.finalize(trp)
1849 1855
1850 1856 tr.close()
1851 1857 finally:
1852 1858 tr.release()
1853 1859 if lock:
1854 1860 lock.release()
1855 1861
1856 1862 if changesets > 0:
1857 1863 # forcefully update the on-disk branch cache
1858 1864 self.ui.debug("updating the branch cache\n")
1859 1865 self.updatebranchcache()
1860 1866 self.hook("changegroup", node=hex(cl.node(clstart)),
1861 1867 source=srctype, url=url)
1862 1868
1863 1869 for i in xrange(clstart, clend):
1864 1870 self.hook("incoming", node=hex(cl.node(i)),
1865 1871 source=srctype, url=url)
1866 1872
1867 1873 # never return 0 here:
1868 1874 if dh < 0:
1869 1875 return dh - 1
1870 1876 else:
1871 1877 return dh + 1
1872 1878
1873 1879 def stream_in(self, remote, requirements):
1874 1880 lock = self.lock()
1875 1881 try:
1876 1882 fp = remote.stream_out()
1877 1883 l = fp.readline()
1878 1884 try:
1879 1885 resp = int(l)
1880 1886 except ValueError:
1881 1887 raise error.ResponseError(
1882 1888 _('Unexpected response from remote server:'), l)
1883 1889 if resp == 1:
1884 1890 raise util.Abort(_('operation forbidden by server'))
1885 1891 elif resp == 2:
1886 1892 raise util.Abort(_('locking the remote repository failed'))
1887 1893 elif resp != 0:
1888 1894 raise util.Abort(_('the server sent an unknown error code'))
1889 1895 self.ui.status(_('streaming all changes\n'))
1890 1896 l = fp.readline()
1891 1897 try:
1892 1898 total_files, total_bytes = map(int, l.split(' ', 1))
1893 1899 except (ValueError, TypeError):
1894 1900 raise error.ResponseError(
1895 1901 _('Unexpected response from remote server:'), l)
1896 1902 self.ui.status(_('%d files to transfer, %s of data\n') %
1897 1903 (total_files, util.bytecount(total_bytes)))
1898 1904 start = time.time()
1899 1905 for i in xrange(total_files):
1900 1906 # XXX doesn't support '\n' or '\r' in filenames
1901 1907 l = fp.readline()
1902 1908 try:
1903 1909 name, size = l.split('\0', 1)
1904 1910 size = int(size)
1905 1911 except (ValueError, TypeError):
1906 1912 raise error.ResponseError(
1907 1913 _('Unexpected response from remote server:'), l)
1908 1914 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1909 1915 # for backwards compat, name was partially encoded
1910 1916 ofp = self.sopener(store.decodedir(name), 'w')
1911 1917 for chunk in util.filechunkiter(fp, limit=size):
1912 1918 ofp.write(chunk)
1913 1919 ofp.close()
1914 1920 elapsed = time.time() - start
1915 1921 if elapsed <= 0:
1916 1922 elapsed = 0.001
1917 1923 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1918 1924 (util.bytecount(total_bytes), elapsed,
1919 1925 util.bytecount(total_bytes / elapsed)))
1920 1926
1921 1927 # new requirements = old non-format requirements + new format-related
1922 1928 # requirements from the streamed-in repository
1923 1929 requirements.update(set(self.requirements) - self.supportedformats)
1924 1930 self._applyrequirements(requirements)
1925 1931 self._writerequirements()
1926 1932
1927 1933 self.invalidate()
1928 1934 return len(self.heads()) + 1
1929 1935 finally:
1930 1936 lock.release()
1931 1937
1932 1938 def clone(self, remote, heads=[], stream=False):
1933 1939 '''clone remote repository.
1934 1940
1935 1941 keyword arguments:
1936 1942 heads: list of revs to clone (forces use of pull)
1937 1943 stream: use streaming clone if possible'''
1938 1944
1939 1945 # now, all clients that can request uncompressed clones can
1940 1946 # read repo formats supported by all servers that can serve
1941 1947 # them.
1942 1948
1943 1949 # if revlog format changes, client will have to check version
1944 1950 # and format flags on "stream" capability, and use
1945 1951 # uncompressed only if compatible.
1946 1952
1947 1953 if stream and not heads:
1948 1954 # 'stream' means remote revlog format is revlogv1 only
1949 1955 if remote.capable('stream'):
1950 1956 return self.stream_in(remote, set(('revlogv1',)))
1951 1957 # otherwise, 'streamreqs' contains the remote revlog format
1952 1958 streamreqs = remote.capable('streamreqs')
1953 1959 if streamreqs:
1954 1960 streamreqs = set(streamreqs.split(','))
1955 1961 # if we support it, stream in and adjust our requirements
1956 1962 if not streamreqs - self.supportedformats:
1957 1963 return self.stream_in(remote, streamreqs)
1958 1964 return self.pull(remote, heads)
1959 1965
1960 1966 def pushkey(self, namespace, key, old, new):
1961 1967 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1962 1968 old=old, new=new)
1963 1969 ret = pushkey.push(self, namespace, key, old, new)
1964 1970 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1965 1971 ret=ret)
1966 1972 return ret
1967 1973
1968 1974 def listkeys(self, namespace):
1969 1975 self.hook('prelistkeys', throw=True, namespace=namespace)
1970 1976 values = pushkey.list(self, namespace)
1971 1977 self.hook('listkeys', namespace=namespace, values=values)
1972 1978 return values
1973 1979
1974 1980 def debugwireargs(self, one, two, three=None, four=None, five=None):
1975 1981 '''used to test argument passing over the wire'''
1976 1982 return "%s %s %s %s %s" % (one, two, three, four, five)
1977 1983
1978 1984 def savecommitmessage(self, text):
1979 1985 fp = self.opener('last-message.txt', 'wb')
1980 1986 try:
1981 1987 fp.write(text)
1982 1988 finally:
1983 1989 fp.close()
1984 1990 return self.pathto(fp.name[len(self.root)+1:])
1985 1991
1986 1992 # used to avoid circular references so destructors work
1987 1993 def aftertrans(files):
1988 1994 renamefiles = [tuple(t) for t in files]
1989 1995 def a():
1990 1996 for src, dest in renamefiles:
1991 1997 util.rename(src, dest)
1992 1998 return a
1993 1999
1994 2000 def undoname(fn):
1995 2001 base, name = os.path.split(fn)
1996 2002 assert name.startswith('journal')
1997 2003 return os.path.join(base, name.replace('journal', 'undo', 1))
1998 2004
1999 2005 def instance(ui, path, create):
2000 2006 return localrepository(ui, util.urllocalpath(path), create)
2001 2007
2002 2008 def islocal(path):
2003 2009 return True
@@ -1,141 +1,142 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from i18n import _
11 11 import changelog, byterange, url, error
12 12 import localrepo, manifest, util, scmutil, store
13 13 import urllib, urllib2, errno
14 14
15 15 class httprangereader(object):
16 16 def __init__(self, url, opener):
17 17 # we assume opener has HTTPRangeHandler
18 18 self.url = url
19 19 self.pos = 0
20 20 self.opener = opener
21 21 self.name = url
22 22 def seek(self, pos):
23 23 self.pos = pos
24 24 def read(self, bytes=None):
25 25 req = urllib2.Request(self.url)
26 26 end = ''
27 27 if bytes:
28 28 end = self.pos + bytes - 1
29 29 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
30 30
31 31 try:
32 32 f = self.opener.open(req)
33 33 data = f.read()
34 34 if hasattr(f, 'getcode'):
35 35 # python 2.6+
36 36 code = f.getcode()
37 37 elif hasattr(f, 'code'):
38 38 # undocumented attribute, seems to be set in 2.4 and 2.5
39 39 code = f.code
40 40 else:
41 41 # Don't know how to check, hope for the best.
42 42 code = 206
43 43 except urllib2.HTTPError, inst:
44 44 num = inst.code == 404 and errno.ENOENT or None
45 45 raise IOError(num, inst)
46 46 except urllib2.URLError, inst:
47 47 raise IOError(None, inst.reason[1])
48 48
49 49 if code == 200:
50 50 # HTTPRangeHandler does nothing if remote does not support
51 51 # Range headers and returns the full entity. Let's slice it.
52 52 if bytes:
53 53 data = data[self.pos:self.pos + bytes]
54 54 else:
55 55 data = data[self.pos:]
56 56 elif bytes:
57 57 data = data[:bytes]
58 58 self.pos += len(data)
59 59 return data
60 60 def __iter__(self):
61 61 return iter(self.read().splitlines(1))
62 62 def close(self):
63 63 pass
64 64
65 65 def build_opener(ui, authinfo):
66 66 # urllib cannot handle URLs with embedded user or passwd
67 67 urlopener = url.opener(ui, authinfo)
68 68 urlopener.add_handler(byterange.HTTPRangeHandler())
69 69
70 70 class statichttpopener(scmutil.abstractopener):
71 71 def __init__(self, base):
72 72 self.base = base
73 73
74 74 def __call__(self, path, mode="r", atomictemp=None):
75 75 if mode not in ('r', 'rb'):
76 76 raise IOError('Permission denied')
77 77 f = "/".join((self.base, urllib.quote(path)))
78 78 return httprangereader(f, urlopener)
79 79
80 80 return statichttpopener
81 81
82 82 class statichttprepository(localrepo.localrepository):
83 83 def __init__(self, ui, path):
84 84 self._url = path
85 85 self.ui = ui
86 86
87 87 self.root = path
88 88 u = util.url(path.rstrip('/') + "/.hg")
89 89 self.path, authinfo = u.authinfo()
90 90
91 91 opener = build_opener(ui, authinfo)
92 92 self.opener = opener(self.path)
93 93
94 94 try:
95 95 requirements = scmutil.readrequires(self.opener, self.supported)
96 96 except IOError, inst:
97 97 if inst.errno != errno.ENOENT:
98 98 raise
99 99 requirements = set()
100 100
101 101 # check if it is a non-empty old-style repository
102 102 try:
103 103 fp = self.opener("00changelog.i")
104 104 fp.read(1)
105 105 fp.close()
106 106 except IOError, inst:
107 107 if inst.errno != errno.ENOENT:
108 108 raise
109 109 # we do not care about empty old-style repositories here
110 110 msg = _("'%s' does not appear to be an hg repository") % path
111 111 raise error.RepoError(msg)
112 112
113 113 # setup store
114 114 self.store = store.store(requirements, self.path, opener)
115 115 self.spath = self.store.path
116 116 self.sopener = self.store.opener
117 117 self.sjoin = self.store.join
118 118
119 119 self.manifest = manifest.manifest(self.sopener)
120 120 self.changelog = changelog.changelog(self.sopener)
121 121 self._tags = None
122 122 self.nodetagscache = None
123 123 self._branchcache = None
124 124 self._branchcachetip = None
125 125 self.encodepats = None
126 126 self.decodepats = None
127 127 self.capabilities.difference_update(["pushkey"])
128 self._filecache = {}
128 129
129 130 def url(self):
130 131 return self._url
131 132
132 133 def local(self):
133 134 return False
134 135
135 136 def lock(self, wait=True):
136 137 raise util.Abort(_('cannot lock static-http repository'))
137 138
138 139 def instance(ui, path, create):
139 140 if create:
140 141 raise util.Abort(_('cannot create new static-http repository'))
141 142 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now