##// END OF EJS Templates
fix calculation of new heads added during push with -r...
Benoit Boissinot -
r3923:27230c29 0.9.3 default
parent child Browse files
Show More
@@ -1,1971 +1,1971 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __del__(self):
22 22 self.transhandle = None
23 23 def __init__(self, parentui, path=None, create=0):
24 24 repo.repository.__init__(self)
25 25 if not path:
26 26 p = os.getcwd()
27 27 while not os.path.isdir(os.path.join(p, ".hg")):
28 28 oldp = p
29 29 p = os.path.dirname(p)
30 30 if p == oldp:
31 31 raise repo.RepoError(_("There is no Mercurial repository"
32 32 " here (.hg not found)"))
33 33 path = p
34 34
35 35 self.path = os.path.join(path, ".hg")
36 36 self.root = os.path.realpath(path)
37 37 self.origroot = path
38 38 self.opener = util.opener(self.path)
39 39 self.wopener = util.opener(self.root)
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 os.mkdir(os.path.join(self.path, "store"))
47 47 requirements = ("revlogv1", "store")
48 48 reqfile = self.opener("requires", "w")
49 49 for r in requirements:
50 50 reqfile.write("%s\n" % r)
51 51 reqfile.close()
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 else:
58 58 raise repo.RepoError(_("repository %s not found") % path)
59 59 elif create:
60 60 raise repo.RepoError(_("repository %s already exists") % path)
61 61 else:
62 62 # find requirements
63 63 try:
64 64 requirements = self.opener("requires").read().splitlines()
65 65 except IOError, inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 requirements = []
69 69 # check them
70 70 for r in requirements:
71 71 if r not in self.supported:
72 72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 73
74 74 # setup store
75 75 if "store" in requirements:
76 76 self.encodefn = util.encodefilename
77 77 self.decodefn = util.decodefilename
78 78 self.spath = os.path.join(self.path, "store")
79 79 else:
80 80 self.encodefn = lambda x: x
81 81 self.decodefn = lambda x: x
82 82 self.spath = self.path
83 83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 84
85 85 self.ui = ui.ui(parentui=parentui)
86 86 try:
87 87 self.ui.readconfig(self.join("hgrc"), self.root)
88 88 except IOError:
89 89 pass
90 90
91 91 v = self.ui.configrevlog()
92 92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 94 fl = v.get('flags', None)
95 95 flags = 0
96 96 if fl != None:
97 97 for x in fl.split():
98 98 flags |= revlog.flagstr(x)
99 99 elif self.revlogv1:
100 100 flags = revlog.REVLOG_DEFAULT_FLAGS
101 101
102 102 v = self.revlogversion | flags
103 103 self.manifest = manifest.manifest(self.sopener, v)
104 104 self.changelog = changelog.changelog(self.sopener, v)
105 105
106 106 fallback = self.ui.config('ui', 'fallbackencoding')
107 107 if fallback:
108 108 util._fallbackencoding = fallback
109 109
110 110 # the changelog might not have the inline index flag
111 111 # on. If the format of the changelog is the same as found in
112 112 # .hgrc, apply any flags found in the .hgrc as well.
113 113 # Otherwise, just version from the changelog
114 114 v = self.changelog.version
115 115 if v == self.revlogversion:
116 116 v |= flags
117 117 self.revlogversion = v
118 118
119 119 self.tagscache = None
120 120 self.branchcache = None
121 121 self.nodetagscache = None
122 122 self.encodepats = None
123 123 self.decodepats = None
124 124 self.transhandle = None
125 125
126 126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127 127
128 128 def url(self):
129 129 return 'file:' + self.root
130 130
131 131 def hook(self, name, throw=False, **args):
132 132 def callhook(hname, funcname):
133 133 '''call python hook. hook is callable object, looked up as
134 134 name in python module. if callable returns "true", hook
135 135 fails, else passes. if hook raises exception, treated as
136 136 hook failure. exception propagates if throw is "true".
137 137
138 138 reason for "true" meaning "hook failed" is so that
139 139 unmodified commands (e.g. mercurial.commands.update) can
140 140 be run as hooks without wrappers to convert return values.'''
141 141
142 142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 143 d = funcname.rfind('.')
144 144 if d == -1:
145 145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 146 % (hname, funcname))
147 147 modname = funcname[:d]
148 148 try:
149 149 obj = __import__(modname)
150 150 except ImportError:
151 151 try:
152 152 # extensions are loaded with hgext_ prefix
153 153 obj = __import__("hgext_%s" % modname)
154 154 except ImportError:
155 155 raise util.Abort(_('%s hook is invalid '
156 156 '(import of "%s" failed)') %
157 157 (hname, modname))
158 158 try:
159 159 for p in funcname.split('.')[1:]:
160 160 obj = getattr(obj, p)
161 161 except AttributeError, err:
162 162 raise util.Abort(_('%s hook is invalid '
163 163 '("%s" is not defined)') %
164 164 (hname, funcname))
165 165 if not callable(obj):
166 166 raise util.Abort(_('%s hook is invalid '
167 167 '("%s" is not callable)') %
168 168 (hname, funcname))
169 169 try:
170 170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 171 except (KeyboardInterrupt, util.SignalInterrupt):
172 172 raise
173 173 except Exception, exc:
174 174 if isinstance(exc, util.Abort):
175 175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 176 (hname, exc.args[0]))
177 177 else:
178 178 self.ui.warn(_('error: %s hook raised an exception: '
179 179 '%s\n') % (hname, exc))
180 180 if throw:
181 181 raise
182 182 self.ui.print_exc()
183 183 return True
184 184 if r:
185 185 if throw:
186 186 raise util.Abort(_('%s hook failed') % hname)
187 187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 188 return r
189 189
190 190 def runhook(name, cmd):
191 191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 193 r = util.system(cmd, environ=env, cwd=self.root)
194 194 if r:
195 195 desc, r = util.explain_exit(r)
196 196 if throw:
197 197 raise util.Abort(_('%s hook %s') % (name, desc))
198 198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 199 return r
200 200
201 201 r = False
202 202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 203 if hname.split(".", 1)[0] == name and cmd]
204 204 hooks.sort()
205 205 for hname, cmd in hooks:
206 206 if cmd.startswith('python:'):
207 207 r = callhook(hname, cmd[7:].strip()) or r
208 208 else:
209 209 r = runhook(hname, cmd) or r
210 210 return r
211 211
212 212 tag_disallowed = ':\r\n'
213 213
214 214 def tag(self, name, node, message, local, user, date):
215 215 '''tag a revision with a symbolic name.
216 216
217 217 if local is True, the tag is stored in a per-repository file.
218 218 otherwise, it is stored in the .hgtags file, and a new
219 219 changeset is committed with the change.
220 220
221 221 keyword arguments:
222 222
223 223 local: whether to store tag in non-version-controlled file
224 224 (default False)
225 225
226 226 message: commit message to use if committing
227 227
228 228 user: name of user to use if committing
229 229
230 230 date: date tuple to use if committing'''
231 231
232 232 for c in self.tag_disallowed:
233 233 if c in name:
234 234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235 235
236 236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237 237
238 238 if local:
239 239 # local tags are stored in the current charset
240 240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 241 self.hook('tag', node=hex(node), tag=name, local=local)
242 242 return
243 243
244 244 for x in self.status()[:5]:
245 245 if '.hgtags' in x:
246 246 raise util.Abort(_('working copy of .hgtags is changed '
247 247 '(please commit .hgtags manually)'))
248 248
249 249 # committed tags are stored in UTF-8
250 250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 251 self.wfile('.hgtags', 'ab').write(line)
252 252 if self.dirstate.state('.hgtags') == '?':
253 253 self.add(['.hgtags'])
254 254
255 255 self.commit(['.hgtags'], message, user, date)
256 256 self.hook('tag', node=hex(node), tag=name, local=local)
257 257
258 258 def tags(self):
259 259 '''return a mapping of tag to node'''
260 260 if not self.tagscache:
261 261 self.tagscache = {}
262 262
263 263 def parsetag(line, context):
264 264 if not line:
265 265 return
266 266 s = l.split(" ", 1)
267 267 if len(s) != 2:
268 268 self.ui.warn(_("%s: cannot parse entry\n") % context)
269 269 return
270 270 node, key = s
271 271 key = util.tolocal(key.strip()) # stored in UTF-8
272 272 try:
273 273 bin_n = bin(node)
274 274 except TypeError:
275 275 self.ui.warn(_("%s: node '%s' is not well formed\n") %
276 276 (context, node))
277 277 return
278 278 if bin_n not in self.changelog.nodemap:
279 279 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
280 280 (context, key))
281 281 return
282 282 self.tagscache[key] = bin_n
283 283
284 284 # read the tags file from each head, ending with the tip,
285 285 # and add each tag found to the map, with "newer" ones
286 286 # taking precedence
287 287 f = None
288 288 for rev, node, fnode in self._hgtagsnodes():
289 289 f = (f and f.filectx(fnode) or
290 290 self.filectx('.hgtags', fileid=fnode))
291 291 count = 0
292 292 for l in f.data().splitlines():
293 293 count += 1
294 294 parsetag(l, _("%s, line %d") % (str(f), count))
295 295
296 296 try:
297 297 f = self.opener("localtags")
298 298 count = 0
299 299 for l in f:
300 300 # localtags are stored in the local character set
301 301 # while the internal tag table is stored in UTF-8
302 302 l = util.fromlocal(l)
303 303 count += 1
304 304 parsetag(l, _("localtags, line %d") % count)
305 305 except IOError:
306 306 pass
307 307
308 308 self.tagscache['tip'] = self.changelog.tip()
309 309
310 310 return self.tagscache
311 311
312 312 def _hgtagsnodes(self):
313 313 heads = self.heads()
314 314 heads.reverse()
315 315 last = {}
316 316 ret = []
317 317 for node in heads:
318 318 c = self.changectx(node)
319 319 rev = c.rev()
320 320 try:
321 321 fnode = c.filenode('.hgtags')
322 322 except repo.LookupError:
323 323 continue
324 324 ret.append((rev, node, fnode))
325 325 if fnode in last:
326 326 ret[last[fnode]] = None
327 327 last[fnode] = len(ret) - 1
328 328 return [item for item in ret if item]
329 329
330 330 def tagslist(self):
331 331 '''return a list of tags ordered by revision'''
332 332 l = []
333 333 for t, n in self.tags().items():
334 334 try:
335 335 r = self.changelog.rev(n)
336 336 except:
337 337 r = -2 # sort to the beginning of the list if unknown
338 338 l.append((r, t, n))
339 339 l.sort()
340 340 return [(t, n) for r, t, n in l]
341 341
342 342 def nodetags(self, node):
343 343 '''return the tags associated with a node'''
344 344 if not self.nodetagscache:
345 345 self.nodetagscache = {}
346 346 for t, n in self.tags().items():
347 347 self.nodetagscache.setdefault(n, []).append(t)
348 348 return self.nodetagscache.get(node, [])
349 349
350 350 def _branchtags(self):
351 351 partial, last, lrev = self._readbranchcache()
352 352
353 353 tiprev = self.changelog.count() - 1
354 354 if lrev != tiprev:
355 355 self._updatebranchcache(partial, lrev+1, tiprev+1)
356 356 self._writebranchcache(partial, self.changelog.tip(), tiprev)
357 357
358 358 return partial
359 359
360 360 def branchtags(self):
361 361 if self.branchcache is not None:
362 362 return self.branchcache
363 363
364 364 self.branchcache = {} # avoid recursion in changectx
365 365 partial = self._branchtags()
366 366
367 367 # the branch cache is stored on disk as UTF-8, but in the local
368 368 # charset internally
369 369 for k, v in partial.items():
370 370 self.branchcache[util.tolocal(k)] = v
371 371 return self.branchcache
372 372
373 373 def _readbranchcache(self):
374 374 partial = {}
375 375 try:
376 376 f = self.opener("branches.cache")
377 377 lines = f.read().split('\n')
378 378 f.close()
379 379 last, lrev = lines.pop(0).rstrip().split(" ", 1)
380 380 last, lrev = bin(last), int(lrev)
381 381 if not (lrev < self.changelog.count() and
382 382 self.changelog.node(lrev) == last): # sanity check
383 383 # invalidate the cache
384 384 raise ValueError('Invalid branch cache: unknown tip')
385 385 for l in lines:
386 386 if not l: continue
387 387 node, label = l.rstrip().split(" ", 1)
388 388 partial[label] = bin(node)
389 389 except (KeyboardInterrupt, util.SignalInterrupt):
390 390 raise
391 391 except Exception, inst:
392 392 if self.ui.debugflag:
393 393 self.ui.warn(str(inst), '\n')
394 394 partial, last, lrev = {}, nullid, nullrev
395 395 return partial, last, lrev
396 396
397 397 def _writebranchcache(self, branches, tip, tiprev):
398 398 try:
399 399 f = self.opener("branches.cache", "w")
400 400 f.write("%s %s\n" % (hex(tip), tiprev))
401 401 for label, node in branches.iteritems():
402 402 f.write("%s %s\n" % (hex(node), label))
403 403 except IOError:
404 404 pass
405 405
406 406 def _updatebranchcache(self, partial, start, end):
407 407 for r in xrange(start, end):
408 408 c = self.changectx(r)
409 409 b = c.branch()
410 410 if b:
411 411 partial[b] = c.node()
412 412
413 413 def lookup(self, key):
414 414 if key == '.':
415 415 key = self.dirstate.parents()[0]
416 416 if key == nullid:
417 417 raise repo.RepoError(_("no revision checked out"))
418 418 elif key == 'null':
419 419 return nullid
420 420 n = self.changelog._match(key)
421 421 if n:
422 422 return n
423 423 if key in self.tags():
424 424 return self.tags()[key]
425 425 if key in self.branchtags():
426 426 return self.branchtags()[key]
427 427 n = self.changelog._partialmatch(key)
428 428 if n:
429 429 return n
430 430 raise repo.RepoError(_("unknown revision '%s'") % key)
431 431
432 432 def dev(self):
433 433 return os.lstat(self.path).st_dev
434 434
435 435 def local(self):
436 436 return True
437 437
438 438 def join(self, f):
439 439 return os.path.join(self.path, f)
440 440
441 441 def sjoin(self, f):
442 442 f = self.encodefn(f)
443 443 return os.path.join(self.spath, f)
444 444
445 445 def wjoin(self, f):
446 446 return os.path.join(self.root, f)
447 447
448 448 def file(self, f):
449 449 if f[0] == '/':
450 450 f = f[1:]
451 451 return filelog.filelog(self.sopener, f, self.revlogversion)
452 452
453 453 def changectx(self, changeid=None):
454 454 return context.changectx(self, changeid)
455 455
456 456 def workingctx(self):
457 457 return context.workingctx(self)
458 458
459 459 def parents(self, changeid=None):
460 460 '''
461 461 get list of changectxs for parents of changeid or working directory
462 462 '''
463 463 if changeid is None:
464 464 pl = self.dirstate.parents()
465 465 else:
466 466 n = self.changelog.lookup(changeid)
467 467 pl = self.changelog.parents(n)
468 468 if pl[1] == nullid:
469 469 return [self.changectx(pl[0])]
470 470 return [self.changectx(pl[0]), self.changectx(pl[1])]
471 471
472 472 def filectx(self, path, changeid=None, fileid=None):
473 473 """changeid can be a changeset revision, node, or tag.
474 474 fileid can be a file revision or node."""
475 475 return context.filectx(self, path, changeid, fileid)
476 476
477 477 def getcwd(self):
478 478 return self.dirstate.getcwd()
479 479
480 480 def wfile(self, f, mode='r'):
481 481 return self.wopener(f, mode)
482 482
483 483 def wread(self, filename):
484 484 if self.encodepats == None:
485 485 l = []
486 486 for pat, cmd in self.ui.configitems("encode"):
487 487 mf = util.matcher(self.root, "", [pat], [], [])[1]
488 488 l.append((mf, cmd))
489 489 self.encodepats = l
490 490
491 491 data = self.wopener(filename, 'r').read()
492 492
493 493 for mf, cmd in self.encodepats:
494 494 if mf(filename):
495 495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
496 496 data = util.filter(data, cmd)
497 497 break
498 498
499 499 return data
500 500
501 501 def wwrite(self, filename, data, fd=None):
502 502 if self.decodepats == None:
503 503 l = []
504 504 for pat, cmd in self.ui.configitems("decode"):
505 505 mf = util.matcher(self.root, "", [pat], [], [])[1]
506 506 l.append((mf, cmd))
507 507 self.decodepats = l
508 508
509 509 for mf, cmd in self.decodepats:
510 510 if mf(filename):
511 511 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
512 512 data = util.filter(data, cmd)
513 513 break
514 514
515 515 if fd:
516 516 return fd.write(data)
517 517 return self.wopener(filename, 'w').write(data)
518 518
519 519 def transaction(self):
520 520 tr = self.transhandle
521 521 if tr != None and tr.running():
522 522 return tr.nest()
523 523
524 524 # save dirstate for rollback
525 525 try:
526 526 ds = self.opener("dirstate").read()
527 527 except IOError:
528 528 ds = ""
529 529 self.opener("journal.dirstate", "w").write(ds)
530 530
531 531 renames = [(self.sjoin("journal"), self.sjoin("undo")),
532 532 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
533 533 tr = transaction.transaction(self.ui.warn, self.sopener,
534 534 self.sjoin("journal"),
535 535 aftertrans(renames))
536 536 self.transhandle = tr
537 537 return tr
538 538
539 539 def recover(self):
540 540 l = self.lock()
541 541 if os.path.exists(self.sjoin("journal")):
542 542 self.ui.status(_("rolling back interrupted transaction\n"))
543 543 transaction.rollback(self.sopener, self.sjoin("journal"))
544 544 self.reload()
545 545 return True
546 546 else:
547 547 self.ui.warn(_("no interrupted transaction available\n"))
548 548 return False
549 549
550 550 def rollback(self, wlock=None):
551 551 if not wlock:
552 552 wlock = self.wlock()
553 553 l = self.lock()
554 554 if os.path.exists(self.sjoin("undo")):
555 555 self.ui.status(_("rolling back last transaction\n"))
556 556 transaction.rollback(self.sopener, self.sjoin("undo"))
557 557 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
558 558 self.reload()
559 559 self.wreload()
560 560 else:
561 561 self.ui.warn(_("no rollback information available\n"))
562 562
563 563 def wreload(self):
564 564 self.dirstate.read()
565 565
566 566 def reload(self):
567 567 self.changelog.load()
568 568 self.manifest.load()
569 569 self.tagscache = None
570 570 self.nodetagscache = None
571 571
572 572 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
573 573 desc=None):
574 574 try:
575 575 l = lock.lock(lockname, 0, releasefn, desc=desc)
576 576 except lock.LockHeld, inst:
577 577 if not wait:
578 578 raise
579 579 self.ui.warn(_("waiting for lock on %s held by %r\n") %
580 580 (desc, inst.locker))
581 581 # default to 600 seconds timeout
582 582 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
583 583 releasefn, desc=desc)
584 584 if acquirefn:
585 585 acquirefn()
586 586 return l
587 587
588 588 def lock(self, wait=1):
589 589 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
590 590 desc=_('repository %s') % self.origroot)
591 591
592 592 def wlock(self, wait=1):
593 593 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
594 594 self.wreload,
595 595 desc=_('working directory of %s') % self.origroot)
596 596
597 597 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
598 598 """
599 599 commit an individual file as part of a larger transaction
600 600 """
601 601
602 602 t = self.wread(fn)
603 603 fl = self.file(fn)
604 604 fp1 = manifest1.get(fn, nullid)
605 605 fp2 = manifest2.get(fn, nullid)
606 606
607 607 meta = {}
608 608 cp = self.dirstate.copied(fn)
609 609 if cp:
610 610 meta["copy"] = cp
611 611 if not manifest2: # not a branch merge
612 612 meta["copyrev"] = hex(manifest1.get(cp, nullid))
613 613 fp2 = nullid
614 614 elif fp2 != nullid: # copied on remote side
615 615 meta["copyrev"] = hex(manifest1.get(cp, nullid))
616 616 elif fp1 != nullid: # copied on local side, reversed
617 617 meta["copyrev"] = hex(manifest2.get(cp))
618 618 fp2 = nullid
619 619 else: # directory rename
620 620 meta["copyrev"] = hex(manifest1.get(cp, nullid))
621 621 self.ui.debug(_(" %s: copy %s:%s\n") %
622 622 (fn, cp, meta["copyrev"]))
623 623 fp1 = nullid
624 624 elif fp2 != nullid:
625 625 # is one parent an ancestor of the other?
626 626 fpa = fl.ancestor(fp1, fp2)
627 627 if fpa == fp1:
628 628 fp1, fp2 = fp2, nullid
629 629 elif fpa == fp2:
630 630 fp2 = nullid
631 631
632 632 # is the file unmodified from the parent? report existing entry
633 633 if fp2 == nullid and not fl.cmp(fp1, t):
634 634 return fp1
635 635
636 636 changelist.append(fn)
637 637 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
638 638
639 639 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
640 640 if p1 is None:
641 641 p1, p2 = self.dirstate.parents()
642 642 return self.commit(files=files, text=text, user=user, date=date,
643 643 p1=p1, p2=p2, wlock=wlock)
644 644
645 645 def commit(self, files=None, text="", user=None, date=None,
646 646 match=util.always, force=False, lock=None, wlock=None,
647 647 force_editor=False, p1=None, p2=None, extra={}):
648 648
649 649 commit = []
650 650 remove = []
651 651 changed = []
652 652 use_dirstate = (p1 is None) # not rawcommit
653 653 extra = extra.copy()
654 654
655 655 if use_dirstate:
656 656 if files:
657 657 for f in files:
658 658 s = self.dirstate.state(f)
659 659 if s in 'nmai':
660 660 commit.append(f)
661 661 elif s == 'r':
662 662 remove.append(f)
663 663 else:
664 664 self.ui.warn(_("%s not tracked!\n") % f)
665 665 else:
666 666 changes = self.status(match=match)[:5]
667 667 modified, added, removed, deleted, unknown = changes
668 668 commit = modified + added
669 669 remove = removed
670 670 else:
671 671 commit = files
672 672
673 673 if use_dirstate:
674 674 p1, p2 = self.dirstate.parents()
675 675 update_dirstate = True
676 676 else:
677 677 p1, p2 = p1, p2 or nullid
678 678 update_dirstate = (self.dirstate.parents()[0] == p1)
679 679
680 680 c1 = self.changelog.read(p1)
681 681 c2 = self.changelog.read(p2)
682 682 m1 = self.manifest.read(c1[0]).copy()
683 683 m2 = self.manifest.read(c2[0])
684 684
685 685 if use_dirstate:
686 686 branchname = self.workingctx().branch()
687 687 try:
688 688 branchname = branchname.decode('UTF-8').encode('UTF-8')
689 689 except UnicodeDecodeError:
690 690 raise util.Abort(_('branch name not in UTF-8!'))
691 691 else:
692 692 branchname = ""
693 693
694 694 if use_dirstate:
695 695 oldname = c1[5].get("branch", "") # stored in UTF-8
696 696 if not commit and not remove and not force and p2 == nullid and \
697 697 branchname == oldname:
698 698 self.ui.status(_("nothing changed\n"))
699 699 return None
700 700
701 701 xp1 = hex(p1)
702 702 if p2 == nullid: xp2 = ''
703 703 else: xp2 = hex(p2)
704 704
705 705 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
706 706
707 707 if not wlock:
708 708 wlock = self.wlock()
709 709 if not lock:
710 710 lock = self.lock()
711 711 tr = self.transaction()
712 712
713 713 # check in files
714 714 new = {}
715 715 linkrev = self.changelog.count()
716 716 commit.sort()
717 717 for f in commit:
718 718 self.ui.note(f + "\n")
719 719 try:
720 720 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
721 721 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
722 722 except IOError:
723 723 if use_dirstate:
724 724 self.ui.warn(_("trouble committing %s!\n") % f)
725 725 raise
726 726 else:
727 727 remove.append(f)
728 728
729 729 # update manifest
730 730 m1.update(new)
731 731 remove.sort()
732 732
733 733 for f in remove:
734 734 if f in m1:
735 735 del m1[f]
736 736 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
737 737
738 738 # add changeset
739 739 new = new.keys()
740 740 new.sort()
741 741
742 742 user = user or self.ui.username()
743 743 if not text or force_editor:
744 744 edittext = []
745 745 if text:
746 746 edittext.append(text)
747 747 edittext.append("")
748 748 edittext.append("HG: user: %s" % user)
749 749 if p2 != nullid:
750 750 edittext.append("HG: branch merge")
751 751 edittext.extend(["HG: changed %s" % f for f in changed])
752 752 edittext.extend(["HG: removed %s" % f for f in remove])
753 753 if not changed and not remove:
754 754 edittext.append("HG: no files changed")
755 755 edittext.append("")
756 756 # run editor in the repository root
757 757 olddir = os.getcwd()
758 758 os.chdir(self.root)
759 759 text = self.ui.edit("\n".join(edittext), user)
760 760 os.chdir(olddir)
761 761
762 762 lines = [line.rstrip() for line in text.rstrip().splitlines()]
763 763 while lines and not lines[0]:
764 764 del lines[0]
765 765 if not lines:
766 766 return None
767 767 text = '\n'.join(lines)
768 768 if branchname:
769 769 extra["branch"] = branchname
770 770 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
771 771 user, date, extra)
772 772 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
773 773 parent2=xp2)
774 774 tr.close()
775 775
776 776 if use_dirstate or update_dirstate:
777 777 self.dirstate.setparents(n)
778 778 if use_dirstate:
779 779 self.dirstate.update(new, "n")
780 780 self.dirstate.forget(remove)
781 781
782 782 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
783 783 return n
784 784
785 785 def walk(self, node=None, files=[], match=util.always, badmatch=None):
786 786 '''
787 787 walk recursively through the directory tree or a given
788 788 changeset, finding all files matched by the match
789 789 function
790 790
791 791 results are yielded in a tuple (src, filename), where src
792 792 is one of:
793 793 'f' the file was found in the directory tree
794 794 'm' the file was only in the dirstate and not in the tree
795 795 'b' file was not found and matched badmatch
796 796 '''
797 797
798 798 if node:
799 799 fdict = dict.fromkeys(files)
800 800 for fn in self.manifest.read(self.changelog.read(node)[0]):
801 801 for ffn in fdict:
802 802 # match if the file is the exact name or a directory
803 803 if ffn == fn or fn.startswith("%s/" % ffn):
804 804 del fdict[ffn]
805 805 break
806 806 if match(fn):
807 807 yield 'm', fn
808 808 for fn in fdict:
809 809 if badmatch and badmatch(fn):
810 810 if match(fn):
811 811 yield 'b', fn
812 812 else:
813 813 self.ui.warn(_('%s: No such file in rev %s\n') % (
814 814 util.pathto(self.getcwd(), fn), short(node)))
815 815 else:
816 816 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
817 817 yield src, fn
818 818
819 819 def status(self, node1=None, node2=None, files=[], match=util.always,
820 820 wlock=None, list_ignored=False, list_clean=False):
821 821 """return status of files between two nodes or node and working directory
822 822
823 823 If node1 is None, use the first dirstate parent instead.
824 824 If node2 is None, compare node1 with working directory.
825 825 """
826 826
827 827 def fcmp(fn, mf):
828 828 t1 = self.wread(fn)
829 829 return self.file(fn).cmp(mf.get(fn, nullid), t1)
830 830
831 831 def mfmatches(node):
832 832 change = self.changelog.read(node)
833 833 mf = self.manifest.read(change[0]).copy()
834 834 for fn in mf.keys():
835 835 if not match(fn):
836 836 del mf[fn]
837 837 return mf
838 838
839 839 modified, added, removed, deleted, unknown = [], [], [], [], []
840 840 ignored, clean = [], []
841 841
842 842 compareworking = False
843 843 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
844 844 compareworking = True
845 845
846 846 if not compareworking:
847 847 # read the manifest from node1 before the manifest from node2,
848 848 # so that we'll hit the manifest cache if we're going through
849 849 # all the revisions in parent->child order.
850 850 mf1 = mfmatches(node1)
851 851
852 852 # are we comparing the working directory?
853 853 if not node2:
854 854 if not wlock:
855 855 try:
856 856 wlock = self.wlock(wait=0)
857 857 except lock.LockException:
858 858 wlock = None
859 859 (lookup, modified, added, removed, deleted, unknown,
860 860 ignored, clean) = self.dirstate.status(files, match,
861 861 list_ignored, list_clean)
862 862
863 863 # are we comparing working dir against its parent?
864 864 if compareworking:
865 865 if lookup:
866 866 # do a full compare of any files that might have changed
867 867 mf2 = mfmatches(self.dirstate.parents()[0])
868 868 for f in lookup:
869 869 if fcmp(f, mf2):
870 870 modified.append(f)
871 871 else:
872 872 clean.append(f)
873 873 if wlock is not None:
874 874 self.dirstate.update([f], "n")
875 875 else:
876 876 # we are comparing working dir against non-parent
877 877 # generate a pseudo-manifest for the working dir
878 878 # XXX: create it in dirstate.py ?
879 879 mf2 = mfmatches(self.dirstate.parents()[0])
880 880 for f in lookup + modified + added:
881 881 mf2[f] = ""
882 882 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
883 883 for f in removed:
884 884 if f in mf2:
885 885 del mf2[f]
886 886 else:
887 887 # we are comparing two revisions
888 888 mf2 = mfmatches(node2)
889 889
890 890 if not compareworking:
891 891 # flush lists from dirstate before comparing manifests
892 892 modified, added, clean = [], [], []
893 893
894 894 # make sure to sort the files so we talk to the disk in a
895 895 # reasonable order
896 896 mf2keys = mf2.keys()
897 897 mf2keys.sort()
898 898 for fn in mf2keys:
899 899 if mf1.has_key(fn):
900 900 if mf1.flags(fn) != mf2.flags(fn) or \
901 901 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
902 902 modified.append(fn)
903 903 elif list_clean:
904 904 clean.append(fn)
905 905 del mf1[fn]
906 906 else:
907 907 added.append(fn)
908 908
909 909 removed = mf1.keys()
910 910
911 911 # sort and return results:
912 912 for l in modified, added, removed, deleted, unknown, ignored, clean:
913 913 l.sort()
914 914 return (modified, added, removed, deleted, unknown, ignored, clean)
915 915
916 916 def add(self, list, wlock=None):
917 917 if not wlock:
918 918 wlock = self.wlock()
919 919 for f in list:
920 920 p = self.wjoin(f)
921 921 if not os.path.exists(p):
922 922 self.ui.warn(_("%s does not exist!\n") % f)
923 923 elif not os.path.isfile(p):
924 924 self.ui.warn(_("%s not added: only files supported currently\n")
925 925 % f)
926 926 elif self.dirstate.state(f) in 'an':
927 927 self.ui.warn(_("%s already tracked!\n") % f)
928 928 else:
929 929 self.dirstate.update([f], "a")
930 930
931 931 def forget(self, list, wlock=None):
932 932 if not wlock:
933 933 wlock = self.wlock()
934 934 for f in list:
935 935 if self.dirstate.state(f) not in 'ai':
936 936 self.ui.warn(_("%s not added!\n") % f)
937 937 else:
938 938 self.dirstate.forget([f])
939 939
940 940 def remove(self, list, unlink=False, wlock=None):
941 941 if unlink:
942 942 for f in list:
943 943 try:
944 944 util.unlink(self.wjoin(f))
945 945 except OSError, inst:
946 946 if inst.errno != errno.ENOENT:
947 947 raise
948 948 if not wlock:
949 949 wlock = self.wlock()
950 950 for f in list:
951 951 p = self.wjoin(f)
952 952 if os.path.exists(p):
953 953 self.ui.warn(_("%s still exists!\n") % f)
954 954 elif self.dirstate.state(f) == 'a':
955 955 self.dirstate.forget([f])
956 956 elif f not in self.dirstate:
957 957 self.ui.warn(_("%s not tracked!\n") % f)
958 958 else:
959 959 self.dirstate.update([f], "r")
960 960
961 961 def undelete(self, list, wlock=None):
962 962 p = self.dirstate.parents()[0]
963 963 mn = self.changelog.read(p)[0]
964 964 m = self.manifest.read(mn)
965 965 if not wlock:
966 966 wlock = self.wlock()
967 967 for f in list:
968 968 if self.dirstate.state(f) not in "r":
969 969 self.ui.warn("%s not removed!\n" % f)
970 970 else:
971 971 t = self.file(f).read(m[f])
972 972 self.wwrite(f, t)
973 973 util.set_exec(self.wjoin(f), m.execf(f))
974 974 self.dirstate.update([f], "n")
975 975
976 976 def copy(self, source, dest, wlock=None):
977 977 p = self.wjoin(dest)
978 978 if not os.path.exists(p):
979 979 self.ui.warn(_("%s does not exist!\n") % dest)
980 980 elif not os.path.isfile(p):
981 981 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
982 982 else:
983 983 if not wlock:
984 984 wlock = self.wlock()
985 985 if self.dirstate.state(dest) == '?':
986 986 self.dirstate.update([dest], "a")
987 987 self.dirstate.copy(source, dest)
988 988
989 989 def heads(self, start=None):
990 990 heads = self.changelog.heads(start)
991 991 # sort the output in rev descending order
992 992 heads = [(-self.changelog.rev(h), h) for h in heads]
993 993 heads.sort()
994 994 return [n for (r, n) in heads]
995 995
996 996 # branchlookup returns a dict giving a list of branches for
997 997 # each head. A branch is defined as the tag of a node or
998 998 # the branch of the node's parents. If a node has multiple
999 999 # branch tags, tags are eliminated if they are visible from other
1000 1000 # branch tags.
1001 1001 #
1002 1002 # So, for this graph: a->b->c->d->e
1003 1003 # \ /
1004 1004 # aa -----/
1005 1005 # a has tag 2.6.12
1006 1006 # d has tag 2.6.13
1007 1007 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1008 1008 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1009 1009 # from the list.
1010 1010 #
1011 1011 # It is possible that more than one head will have the same branch tag.
1012 1012 # callers need to check the result for multiple heads under the same
1013 1013 # branch tag if that is a problem for them (ie checkout of a specific
1014 1014 # branch).
1015 1015 #
1016 1016 # passing in a specific branch will limit the depth of the search
1017 1017 # through the parents. It won't limit the branches returned in the
1018 1018 # result though.
1019 1019 def branchlookup(self, heads=None, branch=None):
1020 1020 if not heads:
1021 1021 heads = self.heads()
1022 1022 headt = [ h for h in heads ]
1023 1023 chlog = self.changelog
1024 1024 branches = {}
1025 1025 merges = []
1026 1026 seenmerge = {}
1027 1027
1028 1028 # traverse the tree once for each head, recording in the branches
1029 1029 # dict which tags are visible from this head. The branches
1030 1030 # dict also records which tags are visible from each tag
1031 1031 # while we traverse.
1032 1032 while headt or merges:
1033 1033 if merges:
1034 1034 n, found = merges.pop()
1035 1035 visit = [n]
1036 1036 else:
1037 1037 h = headt.pop()
1038 1038 visit = [h]
1039 1039 found = [h]
1040 1040 seen = {}
1041 1041 while visit:
1042 1042 n = visit.pop()
1043 1043 if n in seen:
1044 1044 continue
1045 1045 pp = chlog.parents(n)
1046 1046 tags = self.nodetags(n)
1047 1047 if tags:
1048 1048 for x in tags:
1049 1049 if x == 'tip':
1050 1050 continue
1051 1051 for f in found:
1052 1052 branches.setdefault(f, {})[n] = 1
1053 1053 branches.setdefault(n, {})[n] = 1
1054 1054 break
1055 1055 if n not in found:
1056 1056 found.append(n)
1057 1057 if branch in tags:
1058 1058 continue
1059 1059 seen[n] = 1
1060 1060 if pp[1] != nullid and n not in seenmerge:
1061 1061 merges.append((pp[1], [x for x in found]))
1062 1062 seenmerge[n] = 1
1063 1063 if pp[0] != nullid:
1064 1064 visit.append(pp[0])
1065 1065 # traverse the branches dict, eliminating branch tags from each
1066 1066 # head that are visible from another branch tag for that head.
1067 1067 out = {}
1068 1068 viscache = {}
1069 1069 for h in heads:
1070 1070 def visible(node):
1071 1071 if node in viscache:
1072 1072 return viscache[node]
1073 1073 ret = {}
1074 1074 visit = [node]
1075 1075 while visit:
1076 1076 x = visit.pop()
1077 1077 if x in viscache:
1078 1078 ret.update(viscache[x])
1079 1079 elif x not in ret:
1080 1080 ret[x] = 1
1081 1081 if x in branches:
1082 1082 visit[len(visit):] = branches[x].keys()
1083 1083 viscache[node] = ret
1084 1084 return ret
1085 1085 if h not in branches:
1086 1086 continue
1087 1087 # O(n^2), but somewhat limited. This only searches the
1088 1088 # tags visible from a specific head, not all the tags in the
1089 1089 # whole repo.
1090 1090 for b in branches[h]:
1091 1091 vis = False
1092 1092 for bb in branches[h].keys():
1093 1093 if b != bb:
1094 1094 if b in visible(bb):
1095 1095 vis = True
1096 1096 break
1097 1097 if not vis:
1098 1098 l = out.setdefault(h, [])
1099 1099 l[len(l):] = self.nodetags(b)
1100 1100 return out
1101 1101
1102 1102 def branches(self, nodes):
1103 1103 if not nodes:
1104 1104 nodes = [self.changelog.tip()]
1105 1105 b = []
1106 1106 for n in nodes:
1107 1107 t = n
1108 1108 while 1:
1109 1109 p = self.changelog.parents(n)
1110 1110 if p[1] != nullid or p[0] == nullid:
1111 1111 b.append((t, n, p[0], p[1]))
1112 1112 break
1113 1113 n = p[0]
1114 1114 return b
1115 1115
1116 1116 def between(self, pairs):
1117 1117 r = []
1118 1118
1119 1119 for top, bottom in pairs:
1120 1120 n, l, i = top, [], 0
1121 1121 f = 1
1122 1122
1123 1123 while n != bottom:
1124 1124 p = self.changelog.parents(n)[0]
1125 1125 if i == f:
1126 1126 l.append(n)
1127 1127 f = f * 2
1128 1128 n = p
1129 1129 i += 1
1130 1130
1131 1131 r.append(l)
1132 1132
1133 1133 return r
1134 1134
1135 1135 def findincoming(self, remote, base=None, heads=None, force=False):
1136 1136 """Return list of roots of the subsets of missing nodes from remote
1137 1137
1138 1138 If base dict is specified, assume that these nodes and their parents
1139 1139 exist on the remote side and that no child of a node of base exists
1140 1140 in both remote and self.
1141 1141 Furthermore base will be updated to include the nodes that exists
1142 1142 in self and remote but no children exists in self and remote.
1143 1143 If a list of heads is specified, return only nodes which are heads
1144 1144 or ancestors of these heads.
1145 1145
1146 1146 All the ancestors of base are in self and in remote.
1147 1147 All the descendants of the list returned are missing in self.
1148 1148 (and so we know that the rest of the nodes are missing in remote, see
1149 1149 outgoing)
1150 1150 """
1151 1151 m = self.changelog.nodemap
1152 1152 search = []
1153 1153 fetch = {}
1154 1154 seen = {}
1155 1155 seenbranch = {}
1156 1156 if base == None:
1157 1157 base = {}
1158 1158
1159 1159 if not heads:
1160 1160 heads = remote.heads()
1161 1161
1162 1162 if self.changelog.tip() == nullid:
1163 1163 base[nullid] = 1
1164 1164 if heads != [nullid]:
1165 1165 return [nullid]
1166 1166 return []
1167 1167
1168 1168 # assume we're closer to the tip than the root
1169 1169 # and start by examining the heads
1170 1170 self.ui.status(_("searching for changes\n"))
1171 1171
1172 1172 unknown = []
1173 1173 for h in heads:
1174 1174 if h not in m:
1175 1175 unknown.append(h)
1176 1176 else:
1177 1177 base[h] = 1
1178 1178
1179 1179 if not unknown:
1180 1180 return []
1181 1181
1182 1182 req = dict.fromkeys(unknown)
1183 1183 reqcnt = 0
1184 1184
1185 1185 # search through remote branches
1186 1186 # a 'branch' here is a linear segment of history, with four parts:
1187 1187 # head, root, first parent, second parent
1188 1188 # (a branch always has two parents (or none) by definition)
1189 1189 unknown = remote.branches(unknown)
1190 1190 while unknown:
1191 1191 r = []
1192 1192 while unknown:
1193 1193 n = unknown.pop(0)
1194 1194 if n[0] in seen:
1195 1195 continue
1196 1196
1197 1197 self.ui.debug(_("examining %s:%s\n")
1198 1198 % (short(n[0]), short(n[1])))
1199 1199 if n[0] == nullid: # found the end of the branch
1200 1200 pass
1201 1201 elif n in seenbranch:
1202 1202 self.ui.debug(_("branch already found\n"))
1203 1203 continue
1204 1204 elif n[1] and n[1] in m: # do we know the base?
1205 1205 self.ui.debug(_("found incomplete branch %s:%s\n")
1206 1206 % (short(n[0]), short(n[1])))
1207 1207 search.append(n) # schedule branch range for scanning
1208 1208 seenbranch[n] = 1
1209 1209 else:
1210 1210 if n[1] not in seen and n[1] not in fetch:
1211 1211 if n[2] in m and n[3] in m:
1212 1212 self.ui.debug(_("found new changeset %s\n") %
1213 1213 short(n[1]))
1214 1214 fetch[n[1]] = 1 # earliest unknown
1215 1215 for p in n[2:4]:
1216 1216 if p in m:
1217 1217 base[p] = 1 # latest known
1218 1218
1219 1219 for p in n[2:4]:
1220 1220 if p not in req and p not in m:
1221 1221 r.append(p)
1222 1222 req[p] = 1
1223 1223 seen[n[0]] = 1
1224 1224
1225 1225 if r:
1226 1226 reqcnt += 1
1227 1227 self.ui.debug(_("request %d: %s\n") %
1228 1228 (reqcnt, " ".join(map(short, r))))
1229 1229 for p in xrange(0, len(r), 10):
1230 1230 for b in remote.branches(r[p:p+10]):
1231 1231 self.ui.debug(_("received %s:%s\n") %
1232 1232 (short(b[0]), short(b[1])))
1233 1233 unknown.append(b)
1234 1234
1235 1235 # do binary search on the branches we found
1236 1236 while search:
1237 1237 n = search.pop(0)
1238 1238 reqcnt += 1
1239 1239 l = remote.between([(n[0], n[1])])[0]
1240 1240 l.append(n[1])
1241 1241 p = n[0]
1242 1242 f = 1
1243 1243 for i in l:
1244 1244 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1245 1245 if i in m:
1246 1246 if f <= 2:
1247 1247 self.ui.debug(_("found new branch changeset %s\n") %
1248 1248 short(p))
1249 1249 fetch[p] = 1
1250 1250 base[i] = 1
1251 1251 else:
1252 1252 self.ui.debug(_("narrowed branch search to %s:%s\n")
1253 1253 % (short(p), short(i)))
1254 1254 search.append((p, i))
1255 1255 break
1256 1256 p, f = i, f * 2
1257 1257
1258 1258 # sanity check our fetch list
1259 1259 for f in fetch.keys():
1260 1260 if f in m:
1261 1261 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1262 1262
1263 1263 if base.keys() == [nullid]:
1264 1264 if force:
1265 1265 self.ui.warn(_("warning: repository is unrelated\n"))
1266 1266 else:
1267 1267 raise util.Abort(_("repository is unrelated"))
1268 1268
1269 1269 self.ui.debug(_("found new changesets starting at ") +
1270 1270 " ".join([short(f) for f in fetch]) + "\n")
1271 1271
1272 1272 self.ui.debug(_("%d total queries\n") % reqcnt)
1273 1273
1274 1274 return fetch.keys()
1275 1275
1276 1276 def findoutgoing(self, remote, base=None, heads=None, force=False):
1277 1277 """Return list of nodes that are roots of subsets not in remote
1278 1278
1279 1279 If base dict is specified, assume that these nodes and their parents
1280 1280 exist on the remote side.
1281 1281 If a list of heads is specified, return only nodes which are heads
1282 1282 or ancestors of these heads, and return a second element which
1283 1283 contains all remote heads which get new children.
1284 1284 """
1285 1285 if base == None:
1286 1286 base = {}
1287 1287 self.findincoming(remote, base, heads, force=force)
1288 1288
1289 1289 self.ui.debug(_("common changesets up to ")
1290 1290 + " ".join(map(short, base.keys())) + "\n")
1291 1291
1292 1292 remain = dict.fromkeys(self.changelog.nodemap)
1293 1293
1294 1294 # prune everything remote has from the tree
1295 1295 del remain[nullid]
1296 1296 remove = base.keys()
1297 1297 while remove:
1298 1298 n = remove.pop(0)
1299 1299 if n in remain:
1300 1300 del remain[n]
1301 1301 for p in self.changelog.parents(n):
1302 1302 remove.append(p)
1303 1303
1304 1304 # find every node whose parents have been pruned
1305 1305 subset = []
1306 1306 # find every remote head that will get new children
1307 1307 updated_heads = {}
1308 1308 for n in remain:
1309 1309 p1, p2 = self.changelog.parents(n)
1310 1310 if p1 not in remain and p2 not in remain:
1311 1311 subset.append(n)
1312 1312 if heads:
1313 1313 if p1 in heads:
1314 1314 updated_heads[p1] = True
1315 1315 if p2 in heads:
1316 1316 updated_heads[p2] = True
1317 1317
1318 1318 # this is the set of all roots we have to push
1319 1319 if heads:
1320 1320 return subset, updated_heads.keys()
1321 1321 else:
1322 1322 return subset
1323 1323
1324 1324 def pull(self, remote, heads=None, force=False, lock=None):
1325 1325 mylock = False
1326 1326 if not lock:
1327 1327 lock = self.lock()
1328 1328 mylock = True
1329 1329
1330 1330 try:
1331 1331 fetch = self.findincoming(remote, force=force)
1332 1332 if fetch == [nullid]:
1333 1333 self.ui.status(_("requesting all changes\n"))
1334 1334
1335 1335 if not fetch:
1336 1336 self.ui.status(_("no changes found\n"))
1337 1337 return 0
1338 1338
1339 1339 if heads is None:
1340 1340 cg = remote.changegroup(fetch, 'pull')
1341 1341 else:
1342 1342 if 'changegroupsubset' not in remote.capabilities:
1343 1343 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1344 1344 cg = remote.changegroupsubset(fetch, heads, 'pull')
1345 1345 return self.addchangegroup(cg, 'pull', remote.url())
1346 1346 finally:
1347 1347 if mylock:
1348 1348 lock.release()
1349 1349
1350 1350 def push(self, remote, force=False, revs=None):
1351 1351 # there are two ways to push to remote repo:
1352 1352 #
1353 1353 # addchangegroup assumes local user can lock remote
1354 1354 # repo (local filesystem, old ssh servers).
1355 1355 #
1356 1356 # unbundle assumes local user cannot lock remote repo (new ssh
1357 1357 # servers, http servers).
1358 1358
1359 1359 if remote.capable('unbundle'):
1360 1360 return self.push_unbundle(remote, force, revs)
1361 1361 return self.push_addchangegroup(remote, force, revs)
1362 1362
1363 1363 def prepush(self, remote, force, revs):
1364 1364 base = {}
1365 1365 remote_heads = remote.heads()
1366 1366 inc = self.findincoming(remote, base, remote_heads, force=force)
1367 1367
1368 1368 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1369 1369 if revs is not None:
1370 1370 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1371 1371 else:
1372 1372 bases, heads = update, self.changelog.heads()
1373 1373
1374 1374 if not bases:
1375 1375 self.ui.status(_("no changes found\n"))
1376 1376 return None, 1
1377 1377 elif not force:
1378 1378 # check if we're creating new remote heads
1379 1379 # to be a remote head after push, node must be either
1380 1380 # - unknown locally
1381 1381 # - a local outgoing head descended from update
1382 1382 # - a remote head that's known locally and not
1383 1383 # ancestral to an outgoing head
1384 1384
1385 1385 warn = 0
1386 1386
1387 1387 if remote_heads == [nullid]:
1388 1388 warn = 0
1389 1389 elif not revs and len(heads) > len(remote_heads):
1390 1390 warn = 1
1391 1391 else:
1392 1392 newheads = list(heads)
1393 1393 for r in remote_heads:
1394 1394 if r in self.changelog.nodemap:
1395 desc = self.changelog.heads(r)
1395 desc = self.changelog.heads(r, heads)
1396 1396 l = [h for h in heads if h in desc]
1397 1397 if not l:
1398 1398 newheads.append(r)
1399 1399 else:
1400 1400 newheads.append(r)
1401 1401 if len(newheads) > len(remote_heads):
1402 1402 warn = 1
1403 1403
1404 1404 if warn:
1405 1405 self.ui.warn(_("abort: push creates new remote branches!\n"))
1406 1406 self.ui.status(_("(did you forget to merge?"
1407 1407 " use push -f to force)\n"))
1408 1408 return None, 1
1409 1409 elif inc:
1410 1410 self.ui.warn(_("note: unsynced remote changes!\n"))
1411 1411
1412 1412
1413 1413 if revs is None:
1414 1414 cg = self.changegroup(update, 'push')
1415 1415 else:
1416 1416 cg = self.changegroupsubset(update, revs, 'push')
1417 1417 return cg, remote_heads
1418 1418
1419 1419 def push_addchangegroup(self, remote, force, revs):
1420 1420 lock = remote.lock()
1421 1421
1422 1422 ret = self.prepush(remote, force, revs)
1423 1423 if ret[0] is not None:
1424 1424 cg, remote_heads = ret
1425 1425 return remote.addchangegroup(cg, 'push', self.url())
1426 1426 return ret[1]
1427 1427
1428 1428 def push_unbundle(self, remote, force, revs):
1429 1429 # local repo finds heads on server, finds out what revs it
1430 1430 # must push. once revs transferred, if server finds it has
1431 1431 # different heads (someone else won commit/push race), server
1432 1432 # aborts.
1433 1433
1434 1434 ret = self.prepush(remote, force, revs)
1435 1435 if ret[0] is not None:
1436 1436 cg, remote_heads = ret
1437 1437 if force: remote_heads = ['force']
1438 1438 return remote.unbundle(cg, remote_heads, 'push')
1439 1439 return ret[1]
1440 1440
1441 1441 def changegroupinfo(self, nodes):
1442 1442 self.ui.note(_("%d changesets found\n") % len(nodes))
1443 1443 if self.ui.debugflag:
1444 1444 self.ui.debug(_("List of changesets:\n"))
1445 1445 for node in nodes:
1446 1446 self.ui.debug("%s\n" % hex(node))
1447 1447
1448 1448 def changegroupsubset(self, bases, heads, source):
1449 1449 """This function generates a changegroup consisting of all the nodes
1450 1450 that are descendents of any of the bases, and ancestors of any of
1451 1451 the heads.
1452 1452
1453 1453 It is fairly complex as determining which filenodes and which
1454 1454 manifest nodes need to be included for the changeset to be complete
1455 1455 is non-trivial.
1456 1456
1457 1457 Another wrinkle is doing the reverse, figuring out which changeset in
1458 1458 the changegroup a particular filenode or manifestnode belongs to."""
1459 1459
1460 1460 self.hook('preoutgoing', throw=True, source=source)
1461 1461
1462 1462 # Set up some initial variables
1463 1463 # Make it easy to refer to self.changelog
1464 1464 cl = self.changelog
1465 1465 # msng is short for missing - compute the list of changesets in this
1466 1466 # changegroup.
1467 1467 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1468 1468 self.changegroupinfo(msng_cl_lst)
1469 1469 # Some bases may turn out to be superfluous, and some heads may be
1470 1470 # too. nodesbetween will return the minimal set of bases and heads
1471 1471 # necessary to re-create the changegroup.
1472 1472
1473 1473 # Known heads are the list of heads that it is assumed the recipient
1474 1474 # of this changegroup will know about.
1475 1475 knownheads = {}
1476 1476 # We assume that all parents of bases are known heads.
1477 1477 for n in bases:
1478 1478 for p in cl.parents(n):
1479 1479 if p != nullid:
1480 1480 knownheads[p] = 1
1481 1481 knownheads = knownheads.keys()
1482 1482 if knownheads:
1483 1483 # Now that we know what heads are known, we can compute which
1484 1484 # changesets are known. The recipient must know about all
1485 1485 # changesets required to reach the known heads from the null
1486 1486 # changeset.
1487 1487 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1488 1488 junk = None
1489 1489 # Transform the list into an ersatz set.
1490 1490 has_cl_set = dict.fromkeys(has_cl_set)
1491 1491 else:
1492 1492 # If there were no known heads, the recipient cannot be assumed to
1493 1493 # know about any changesets.
1494 1494 has_cl_set = {}
1495 1495
1496 1496 # Make it easy to refer to self.manifest
1497 1497 mnfst = self.manifest
1498 1498 # We don't know which manifests are missing yet
1499 1499 msng_mnfst_set = {}
1500 1500 # Nor do we know which filenodes are missing.
1501 1501 msng_filenode_set = {}
1502 1502
1503 1503 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1504 1504 junk = None
1505 1505
1506 1506 # A changeset always belongs to itself, so the changenode lookup
1507 1507 # function for a changenode is identity.
1508 1508 def identity(x):
1509 1509 return x
1510 1510
1511 1511 # A function generating function. Sets up an environment for the
1512 1512 # inner function.
1513 1513 def cmp_by_rev_func(revlog):
1514 1514 # Compare two nodes by their revision number in the environment's
1515 1515 # revision history. Since the revision number both represents the
1516 1516 # most efficient order to read the nodes in, and represents a
1517 1517 # topological sorting of the nodes, this function is often useful.
1518 1518 def cmp_by_rev(a, b):
1519 1519 return cmp(revlog.rev(a), revlog.rev(b))
1520 1520 return cmp_by_rev
1521 1521
1522 1522 # If we determine that a particular file or manifest node must be a
1523 1523 # node that the recipient of the changegroup will already have, we can
1524 1524 # also assume the recipient will have all the parents. This function
1525 1525 # prunes them from the set of missing nodes.
1526 1526 def prune_parents(revlog, hasset, msngset):
1527 1527 haslst = hasset.keys()
1528 1528 haslst.sort(cmp_by_rev_func(revlog))
1529 1529 for node in haslst:
1530 1530 parentlst = [p for p in revlog.parents(node) if p != nullid]
1531 1531 while parentlst:
1532 1532 n = parentlst.pop()
1533 1533 if n not in hasset:
1534 1534 hasset[n] = 1
1535 1535 p = [p for p in revlog.parents(n) if p != nullid]
1536 1536 parentlst.extend(p)
1537 1537 for n in hasset:
1538 1538 msngset.pop(n, None)
1539 1539
1540 1540 # This is a function generating function used to set up an environment
1541 1541 # for the inner function to execute in.
1542 1542 def manifest_and_file_collector(changedfileset):
1543 1543 # This is an information gathering function that gathers
1544 1544 # information from each changeset node that goes out as part of
1545 1545 # the changegroup. The information gathered is a list of which
1546 1546 # manifest nodes are potentially required (the recipient may
1547 1547 # already have them) and total list of all files which were
1548 1548 # changed in any changeset in the changegroup.
1549 1549 #
1550 1550 # We also remember the first changenode we saw any manifest
1551 1551 # referenced by so we can later determine which changenode 'owns'
1552 1552 # the manifest.
1553 1553 def collect_manifests_and_files(clnode):
1554 1554 c = cl.read(clnode)
1555 1555 for f in c[3]:
1556 1556 # This is to make sure we only have one instance of each
1557 1557 # filename string for each filename.
1558 1558 changedfileset.setdefault(f, f)
1559 1559 msng_mnfst_set.setdefault(c[0], clnode)
1560 1560 return collect_manifests_and_files
1561 1561
1562 1562 # Figure out which manifest nodes (of the ones we think might be part
1563 1563 # of the changegroup) the recipient must know about and remove them
1564 1564 # from the changegroup.
1565 1565 def prune_manifests():
1566 1566 has_mnfst_set = {}
1567 1567 for n in msng_mnfst_set:
1568 1568 # If a 'missing' manifest thinks it belongs to a changenode
1569 1569 # the recipient is assumed to have, obviously the recipient
1570 1570 # must have that manifest.
1571 1571 linknode = cl.node(mnfst.linkrev(n))
1572 1572 if linknode in has_cl_set:
1573 1573 has_mnfst_set[n] = 1
1574 1574 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1575 1575
1576 1576 # Use the information collected in collect_manifests_and_files to say
1577 1577 # which changenode any manifestnode belongs to.
1578 1578 def lookup_manifest_link(mnfstnode):
1579 1579 return msng_mnfst_set[mnfstnode]
1580 1580
1581 1581 # A function generating function that sets up the initial environment
1582 1582 # the inner function.
1583 1583 def filenode_collector(changedfiles):
1584 1584 next_rev = [0]
1585 1585 # This gathers information from each manifestnode included in the
1586 1586 # changegroup about which filenodes the manifest node references
1587 1587 # so we can include those in the changegroup too.
1588 1588 #
1589 1589 # It also remembers which changenode each filenode belongs to. It
1590 1590 # does this by assuming the a filenode belongs to the changenode
1591 1591 # the first manifest that references it belongs to.
1592 1592 def collect_msng_filenodes(mnfstnode):
1593 1593 r = mnfst.rev(mnfstnode)
1594 1594 if r == next_rev[0]:
1595 1595 # If the last rev we looked at was the one just previous,
1596 1596 # we only need to see a diff.
1597 1597 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1598 1598 # For each line in the delta
1599 1599 for dline in delta.splitlines():
1600 1600 # get the filename and filenode for that line
1601 1601 f, fnode = dline.split('\0')
1602 1602 fnode = bin(fnode[:40])
1603 1603 f = changedfiles.get(f, None)
1604 1604 # And if the file is in the list of files we care
1605 1605 # about.
1606 1606 if f is not None:
1607 1607 # Get the changenode this manifest belongs to
1608 1608 clnode = msng_mnfst_set[mnfstnode]
1609 1609 # Create the set of filenodes for the file if
1610 1610 # there isn't one already.
1611 1611 ndset = msng_filenode_set.setdefault(f, {})
1612 1612 # And set the filenode's changelog node to the
1613 1613 # manifest's if it hasn't been set already.
1614 1614 ndset.setdefault(fnode, clnode)
1615 1615 else:
1616 1616 # Otherwise we need a full manifest.
1617 1617 m = mnfst.read(mnfstnode)
1618 1618 # For every file in we care about.
1619 1619 for f in changedfiles:
1620 1620 fnode = m.get(f, None)
1621 1621 # If it's in the manifest
1622 1622 if fnode is not None:
1623 1623 # See comments above.
1624 1624 clnode = msng_mnfst_set[mnfstnode]
1625 1625 ndset = msng_filenode_set.setdefault(f, {})
1626 1626 ndset.setdefault(fnode, clnode)
1627 1627 # Remember the revision we hope to see next.
1628 1628 next_rev[0] = r + 1
1629 1629 return collect_msng_filenodes
1630 1630
1631 1631 # We have a list of filenodes we think we need for a file, lets remove
1632 1632 # all those we now the recipient must have.
1633 1633 def prune_filenodes(f, filerevlog):
1634 1634 msngset = msng_filenode_set[f]
1635 1635 hasset = {}
1636 1636 # If a 'missing' filenode thinks it belongs to a changenode we
1637 1637 # assume the recipient must have, then the recipient must have
1638 1638 # that filenode.
1639 1639 for n in msngset:
1640 1640 clnode = cl.node(filerevlog.linkrev(n))
1641 1641 if clnode in has_cl_set:
1642 1642 hasset[n] = 1
1643 1643 prune_parents(filerevlog, hasset, msngset)
1644 1644
1645 1645 # A function generator function that sets up the a context for the
1646 1646 # inner function.
1647 1647 def lookup_filenode_link_func(fname):
1648 1648 msngset = msng_filenode_set[fname]
1649 1649 # Lookup the changenode the filenode belongs to.
1650 1650 def lookup_filenode_link(fnode):
1651 1651 return msngset[fnode]
1652 1652 return lookup_filenode_link
1653 1653
1654 1654 # Now that we have all theses utility functions to help out and
1655 1655 # logically divide up the task, generate the group.
1656 1656 def gengroup():
1657 1657 # The set of changed files starts empty.
1658 1658 changedfiles = {}
1659 1659 # Create a changenode group generator that will call our functions
1660 1660 # back to lookup the owning changenode and collect information.
1661 1661 group = cl.group(msng_cl_lst, identity,
1662 1662 manifest_and_file_collector(changedfiles))
1663 1663 for chnk in group:
1664 1664 yield chnk
1665 1665
1666 1666 # The list of manifests has been collected by the generator
1667 1667 # calling our functions back.
1668 1668 prune_manifests()
1669 1669 msng_mnfst_lst = msng_mnfst_set.keys()
1670 1670 # Sort the manifestnodes by revision number.
1671 1671 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1672 1672 # Create a generator for the manifestnodes that calls our lookup
1673 1673 # and data collection functions back.
1674 1674 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1675 1675 filenode_collector(changedfiles))
1676 1676 for chnk in group:
1677 1677 yield chnk
1678 1678
1679 1679 # These are no longer needed, dereference and toss the memory for
1680 1680 # them.
1681 1681 msng_mnfst_lst = None
1682 1682 msng_mnfst_set.clear()
1683 1683
1684 1684 changedfiles = changedfiles.keys()
1685 1685 changedfiles.sort()
1686 1686 # Go through all our files in order sorted by name.
1687 1687 for fname in changedfiles:
1688 1688 filerevlog = self.file(fname)
1689 1689 # Toss out the filenodes that the recipient isn't really
1690 1690 # missing.
1691 1691 if msng_filenode_set.has_key(fname):
1692 1692 prune_filenodes(fname, filerevlog)
1693 1693 msng_filenode_lst = msng_filenode_set[fname].keys()
1694 1694 else:
1695 1695 msng_filenode_lst = []
1696 1696 # If any filenodes are left, generate the group for them,
1697 1697 # otherwise don't bother.
1698 1698 if len(msng_filenode_lst) > 0:
1699 1699 yield changegroup.genchunk(fname)
1700 1700 # Sort the filenodes by their revision #
1701 1701 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1702 1702 # Create a group generator and only pass in a changenode
1703 1703 # lookup function as we need to collect no information
1704 1704 # from filenodes.
1705 1705 group = filerevlog.group(msng_filenode_lst,
1706 1706 lookup_filenode_link_func(fname))
1707 1707 for chnk in group:
1708 1708 yield chnk
1709 1709 if msng_filenode_set.has_key(fname):
1710 1710 # Don't need this anymore, toss it to free memory.
1711 1711 del msng_filenode_set[fname]
1712 1712 # Signal that no more groups are left.
1713 1713 yield changegroup.closechunk()
1714 1714
1715 1715 if msng_cl_lst:
1716 1716 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1717 1717
1718 1718 return util.chunkbuffer(gengroup())
1719 1719
1720 1720 def changegroup(self, basenodes, source):
1721 1721 """Generate a changegroup of all nodes that we have that a recipient
1722 1722 doesn't.
1723 1723
1724 1724 This is much easier than the previous function as we can assume that
1725 1725 the recipient has any changenode we aren't sending them."""
1726 1726
1727 1727 self.hook('preoutgoing', throw=True, source=source)
1728 1728
1729 1729 cl = self.changelog
1730 1730 nodes = cl.nodesbetween(basenodes, None)[0]
1731 1731 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1732 1732 self.changegroupinfo(nodes)
1733 1733
1734 1734 def identity(x):
1735 1735 return x
1736 1736
1737 1737 def gennodelst(revlog):
1738 1738 for r in xrange(0, revlog.count()):
1739 1739 n = revlog.node(r)
1740 1740 if revlog.linkrev(n) in revset:
1741 1741 yield n
1742 1742
1743 1743 def changed_file_collector(changedfileset):
1744 1744 def collect_changed_files(clnode):
1745 1745 c = cl.read(clnode)
1746 1746 for fname in c[3]:
1747 1747 changedfileset[fname] = 1
1748 1748 return collect_changed_files
1749 1749
1750 1750 def lookuprevlink_func(revlog):
1751 1751 def lookuprevlink(n):
1752 1752 return cl.node(revlog.linkrev(n))
1753 1753 return lookuprevlink
1754 1754
1755 1755 def gengroup():
1756 1756 # construct a list of all changed files
1757 1757 changedfiles = {}
1758 1758
1759 1759 for chnk in cl.group(nodes, identity,
1760 1760 changed_file_collector(changedfiles)):
1761 1761 yield chnk
1762 1762 changedfiles = changedfiles.keys()
1763 1763 changedfiles.sort()
1764 1764
1765 1765 mnfst = self.manifest
1766 1766 nodeiter = gennodelst(mnfst)
1767 1767 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1768 1768 yield chnk
1769 1769
1770 1770 for fname in changedfiles:
1771 1771 filerevlog = self.file(fname)
1772 1772 nodeiter = gennodelst(filerevlog)
1773 1773 nodeiter = list(nodeiter)
1774 1774 if nodeiter:
1775 1775 yield changegroup.genchunk(fname)
1776 1776 lookup = lookuprevlink_func(filerevlog)
1777 1777 for chnk in filerevlog.group(nodeiter, lookup):
1778 1778 yield chnk
1779 1779
1780 1780 yield changegroup.closechunk()
1781 1781
1782 1782 if nodes:
1783 1783 self.hook('outgoing', node=hex(nodes[0]), source=source)
1784 1784
1785 1785 return util.chunkbuffer(gengroup())
1786 1786
1787 1787 def addchangegroup(self, source, srctype, url):
1788 1788 """add changegroup to repo.
1789 1789
1790 1790 return values:
1791 1791 - nothing changed or no source: 0
1792 1792 - more heads than before: 1+added heads (2..n)
1793 1793 - less heads than before: -1-removed heads (-2..-n)
1794 1794 - number of heads stays the same: 1
1795 1795 """
1796 1796 def csmap(x):
1797 1797 self.ui.debug(_("add changeset %s\n") % short(x))
1798 1798 return cl.count()
1799 1799
1800 1800 def revmap(x):
1801 1801 return cl.rev(x)
1802 1802
1803 1803 if not source:
1804 1804 return 0
1805 1805
1806 1806 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1807 1807
1808 1808 changesets = files = revisions = 0
1809 1809
1810 1810 tr = self.transaction()
1811 1811
1812 1812 # write changelog data to temp files so concurrent readers will not see
1813 1813 # inconsistent view
1814 1814 cl = None
1815 1815 try:
1816 1816 cl = appendfile.appendchangelog(self.sopener,
1817 1817 self.changelog.version)
1818 1818
1819 1819 oldheads = len(cl.heads())
1820 1820
1821 1821 # pull off the changeset group
1822 1822 self.ui.status(_("adding changesets\n"))
1823 1823 cor = cl.count() - 1
1824 1824 chunkiter = changegroup.chunkiter(source)
1825 1825 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1826 1826 raise util.Abort(_("received changelog group is empty"))
1827 1827 cnr = cl.count() - 1
1828 1828 changesets = cnr - cor
1829 1829
1830 1830 # pull off the manifest group
1831 1831 self.ui.status(_("adding manifests\n"))
1832 1832 chunkiter = changegroup.chunkiter(source)
1833 1833 # no need to check for empty manifest group here:
1834 1834 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1835 1835 # no new manifest will be created and the manifest group will
1836 1836 # be empty during the pull
1837 1837 self.manifest.addgroup(chunkiter, revmap, tr)
1838 1838
1839 1839 # process the files
1840 1840 self.ui.status(_("adding file changes\n"))
1841 1841 while 1:
1842 1842 f = changegroup.getchunk(source)
1843 1843 if not f:
1844 1844 break
1845 1845 self.ui.debug(_("adding %s revisions\n") % f)
1846 1846 fl = self.file(f)
1847 1847 o = fl.count()
1848 1848 chunkiter = changegroup.chunkiter(source)
1849 1849 if fl.addgroup(chunkiter, revmap, tr) is None:
1850 1850 raise util.Abort(_("received file revlog group is empty"))
1851 1851 revisions += fl.count() - o
1852 1852 files += 1
1853 1853
1854 1854 cl.writedata()
1855 1855 finally:
1856 1856 if cl:
1857 1857 cl.cleanup()
1858 1858
1859 1859 # make changelog see real files again
1860 1860 self.changelog = changelog.changelog(self.sopener,
1861 1861 self.changelog.version)
1862 1862 self.changelog.checkinlinesize(tr)
1863 1863
1864 1864 newheads = len(self.changelog.heads())
1865 1865 heads = ""
1866 1866 if oldheads and newheads != oldheads:
1867 1867 heads = _(" (%+d heads)") % (newheads - oldheads)
1868 1868
1869 1869 self.ui.status(_("added %d changesets"
1870 1870 " with %d changes to %d files%s\n")
1871 1871 % (changesets, revisions, files, heads))
1872 1872
1873 1873 if changesets > 0:
1874 1874 self.hook('pretxnchangegroup', throw=True,
1875 1875 node=hex(self.changelog.node(cor+1)), source=srctype,
1876 1876 url=url)
1877 1877
1878 1878 tr.close()
1879 1879
1880 1880 if changesets > 0:
1881 1881 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1882 1882 source=srctype, url=url)
1883 1883
1884 1884 for i in xrange(cor + 1, cnr + 1):
1885 1885 self.hook("incoming", node=hex(self.changelog.node(i)),
1886 1886 source=srctype, url=url)
1887 1887
1888 1888 # never return 0 here:
1889 1889 if newheads < oldheads:
1890 1890 return newheads - oldheads - 1
1891 1891 else:
1892 1892 return newheads - oldheads + 1
1893 1893
1894 1894
1895 1895 def stream_in(self, remote):
1896 1896 fp = remote.stream_out()
1897 1897 l = fp.readline()
1898 1898 try:
1899 1899 resp = int(l)
1900 1900 except ValueError:
1901 1901 raise util.UnexpectedOutput(
1902 1902 _('Unexpected response from remote server:'), l)
1903 1903 if resp == 1:
1904 1904 raise util.Abort(_('operation forbidden by server'))
1905 1905 elif resp == 2:
1906 1906 raise util.Abort(_('locking the remote repository failed'))
1907 1907 elif resp != 0:
1908 1908 raise util.Abort(_('the server sent an unknown error code'))
1909 1909 self.ui.status(_('streaming all changes\n'))
1910 1910 l = fp.readline()
1911 1911 try:
1912 1912 total_files, total_bytes = map(int, l.split(' ', 1))
1913 1913 except ValueError, TypeError:
1914 1914 raise util.UnexpectedOutput(
1915 1915 _('Unexpected response from remote server:'), l)
1916 1916 self.ui.status(_('%d files to transfer, %s of data\n') %
1917 1917 (total_files, util.bytecount(total_bytes)))
1918 1918 start = time.time()
1919 1919 for i in xrange(total_files):
1920 1920 # XXX doesn't support '\n' or '\r' in filenames
1921 1921 l = fp.readline()
1922 1922 try:
1923 1923 name, size = l.split('\0', 1)
1924 1924 size = int(size)
1925 1925 except ValueError, TypeError:
1926 1926 raise util.UnexpectedOutput(
1927 1927 _('Unexpected response from remote server:'), l)
1928 1928 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1929 1929 ofp = self.sopener(name, 'w')
1930 1930 for chunk in util.filechunkiter(fp, limit=size):
1931 1931 ofp.write(chunk)
1932 1932 ofp.close()
1933 1933 elapsed = time.time() - start
1934 1934 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1935 1935 (util.bytecount(total_bytes), elapsed,
1936 1936 util.bytecount(total_bytes / elapsed)))
1937 1937 self.reload()
1938 1938 return len(self.heads()) + 1
1939 1939
1940 1940 def clone(self, remote, heads=[], stream=False):
1941 1941 '''clone remote repository.
1942 1942
1943 1943 keyword arguments:
1944 1944 heads: list of revs to clone (forces use of pull)
1945 1945 stream: use streaming clone if possible'''
1946 1946
1947 1947 # now, all clients that can request uncompressed clones can
1948 1948 # read repo formats supported by all servers that can serve
1949 1949 # them.
1950 1950
1951 1951 # if revlog format changes, client will have to check version
1952 1952 # and format flags on "stream" capability, and use
1953 1953 # uncompressed only if compatible.
1954 1954
1955 1955 if stream and not heads and remote.capable('stream'):
1956 1956 return self.stream_in(remote)
1957 1957 return self.pull(remote, heads)
1958 1958
1959 1959 # used to avoid circular references so destructors work
1960 1960 def aftertrans(files):
1961 1961 renamefiles = [tuple(t) for t in files]
1962 1962 def a():
1963 1963 for src, dest in renamefiles:
1964 1964 util.rename(src, dest)
1965 1965 return a
1966 1966
1967 1967 def instance(ui, path, create):
1968 1968 return localrepository(ui, util.drop_scheme('file', path), create)
1969 1969
1970 1970 def islocal(path):
1971 1971 return True
@@ -1,1286 +1,1292 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 16 demandload(globals(), "binascii changegroup errno ancestor mdiff os")
17 17 demandload(globals(), "sha struct util zlib")
18 18
19 19 # revlog version strings
20 20 REVLOGV0 = 0
21 21 REVLOGNG = 1
22 22
23 23 # revlog flags
24 24 REVLOGNGINLINEDATA = (1 << 16)
25 25 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
26 26
27 27 REVLOG_DEFAULT_FORMAT = REVLOGNG
28 28 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
29 29
30 30 def flagstr(flag):
31 31 if flag == "inline":
32 32 return REVLOGNGINLINEDATA
33 33 raise RevlogError(_("unknown revlog flag %s") % flag)
34 34
35 35 def hash(text, p1, p2):
36 36 """generate a hash from the given text and its parent hashes
37 37
38 38 This hash combines both the current file contents and its history
39 39 in a manner that makes it easy to distinguish nodes with the same
40 40 content in the revision graph.
41 41 """
42 42 l = [p1, p2]
43 43 l.sort()
44 44 s = sha.new(l[0])
45 45 s.update(l[1])
46 46 s.update(text)
47 47 return s.digest()
48 48
49 49 def compress(text):
50 50 """ generate a possibly-compressed representation of text """
51 51 if not text: return ("", text)
52 52 if len(text) < 44:
53 53 if text[0] == '\0': return ("", text)
54 54 return ('u', text)
55 55 bin = zlib.compress(text)
56 56 if len(bin) > len(text):
57 57 if text[0] == '\0': return ("", text)
58 58 return ('u', text)
59 59 return ("", bin)
60 60
61 61 def decompress(bin):
62 62 """ decompress the given input """
63 63 if not bin: return bin
64 64 t = bin[0]
65 65 if t == '\0': return bin
66 66 if t == 'x': return zlib.decompress(bin)
67 67 if t == 'u': return bin[1:]
68 68 raise RevlogError(_("unknown compression type %r") % t)
69 69
70 70 indexformatv0 = ">4l20s20s20s"
71 71 v0shaoffset = 56
72 72 # index ng:
73 73 # 6 bytes offset
74 74 # 2 bytes flags
75 75 # 4 bytes compressed length
76 76 # 4 bytes uncompressed length
77 77 # 4 bytes: base rev
78 78 # 4 bytes link rev
79 79 # 4 bytes parent 1 rev
80 80 # 4 bytes parent 2 rev
81 81 # 32 bytes: nodeid
82 82 indexformatng = ">Qiiiiii20s12x"
83 83 ngshaoffset = 32
84 84 versionformat = ">I"
85 85
86 86 class lazyparser(object):
87 87 """
88 88 this class avoids the need to parse the entirety of large indices
89 89 """
90 90
91 91 # lazyparser is not safe to use on windows if win32 extensions not
92 92 # available. it keeps file handle open, which make it not possible
93 93 # to break hardlinks on local cloned repos.
94 94 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
95 95 hasattr(util, 'win32api'))
96 96
97 97 def __init__(self, dataf, size, indexformat, shaoffset):
98 98 self.dataf = dataf
99 99 self.format = indexformat
100 100 self.s = struct.calcsize(indexformat)
101 101 self.indexformat = indexformat
102 102 self.datasize = size
103 103 self.l = size/self.s
104 104 self.index = [None] * self.l
105 105 self.map = {nullid: nullrev}
106 106 self.allmap = 0
107 107 self.all = 0
108 108 self.mapfind_count = 0
109 109 self.shaoffset = shaoffset
110 110
111 111 def loadmap(self):
112 112 """
113 113 during a commit, we need to make sure the rev being added is
114 114 not a duplicate. This requires loading the entire index,
115 115 which is fairly slow. loadmap can load up just the node map,
116 116 which takes much less time.
117 117 """
118 118 if self.allmap: return
119 119 end = self.datasize
120 120 self.allmap = 1
121 121 cur = 0
122 122 count = 0
123 123 blocksize = self.s * 256
124 124 self.dataf.seek(0)
125 125 while cur < end:
126 126 data = self.dataf.read(blocksize)
127 127 off = 0
128 128 for x in xrange(256):
129 129 n = data[off + self.shaoffset:off + self.shaoffset + 20]
130 130 self.map[n] = count
131 131 count += 1
132 132 if count >= self.l:
133 133 break
134 134 off += self.s
135 135 cur += blocksize
136 136
137 137 def loadblock(self, blockstart, blocksize, data=None):
138 138 if self.all: return
139 139 if data is None:
140 140 self.dataf.seek(blockstart)
141 141 if blockstart + blocksize > self.datasize:
142 142 # the revlog may have grown since we've started running,
143 143 # but we don't have space in self.index for more entries.
144 144 # limit blocksize so that we don't get too much data.
145 145 blocksize = max(self.datasize - blockstart, 0)
146 146 data = self.dataf.read(blocksize)
147 147 lend = len(data) / self.s
148 148 i = blockstart / self.s
149 149 off = 0
150 150 for x in xrange(lend):
151 151 if self.index[i + x] == None:
152 152 b = data[off : off + self.s]
153 153 self.index[i + x] = b
154 154 n = b[self.shaoffset:self.shaoffset + 20]
155 155 self.map[n] = i + x
156 156 off += self.s
157 157
158 158 def findnode(self, node):
159 159 """search backwards through the index file for a specific node"""
160 160 if self.allmap: return None
161 161
162 162 # hg log will cause many many searches for the manifest
163 163 # nodes. After we get called a few times, just load the whole
164 164 # thing.
165 165 if self.mapfind_count > 8:
166 166 self.loadmap()
167 167 if node in self.map:
168 168 return node
169 169 return None
170 170 self.mapfind_count += 1
171 171 last = self.l - 1
172 172 while self.index[last] != None:
173 173 if last == 0:
174 174 self.all = 1
175 175 self.allmap = 1
176 176 return None
177 177 last -= 1
178 178 end = (last + 1) * self.s
179 179 blocksize = self.s * 256
180 180 while end >= 0:
181 181 start = max(end - blocksize, 0)
182 182 self.dataf.seek(start)
183 183 data = self.dataf.read(end - start)
184 184 findend = end - start
185 185 while True:
186 186 # we're searching backwards, so weh have to make sure
187 187 # we don't find a changeset where this node is a parent
188 188 off = data.rfind(node, 0, findend)
189 189 findend = off
190 190 if off >= 0:
191 191 i = off / self.s
192 192 off = i * self.s
193 193 n = data[off + self.shaoffset:off + self.shaoffset + 20]
194 194 if n == node:
195 195 self.map[n] = i + start / self.s
196 196 return node
197 197 else:
198 198 break
199 199 end -= blocksize
200 200 return None
201 201
202 202 def loadindex(self, i=None, end=None):
203 203 if self.all: return
204 204 all = False
205 205 if i == None:
206 206 blockstart = 0
207 207 blocksize = (512 / self.s) * self.s
208 208 end = self.datasize
209 209 all = True
210 210 else:
211 211 if end:
212 212 blockstart = i * self.s
213 213 end = end * self.s
214 214 blocksize = end - blockstart
215 215 else:
216 216 blockstart = (i & ~(32)) * self.s
217 217 blocksize = self.s * 64
218 218 end = blockstart + blocksize
219 219 while blockstart < end:
220 220 self.loadblock(blockstart, blocksize)
221 221 blockstart += blocksize
222 222 if all: self.all = True
223 223
224 224 class lazyindex(object):
225 225 """a lazy version of the index array"""
226 226 def __init__(self, parser):
227 227 self.p = parser
228 228 def __len__(self):
229 229 return len(self.p.index)
230 230 def load(self, pos):
231 231 if pos < 0:
232 232 pos += len(self.p.index)
233 233 self.p.loadindex(pos)
234 234 return self.p.index[pos]
235 235 def __getitem__(self, pos):
236 236 ret = self.p.index[pos] or self.load(pos)
237 237 if isinstance(ret, str):
238 238 ret = struct.unpack(self.p.indexformat, ret)
239 239 return ret
240 240 def __setitem__(self, pos, item):
241 241 self.p.index[pos] = item
242 242 def __delitem__(self, pos):
243 243 del self.p.index[pos]
244 244 def append(self, e):
245 245 self.p.index.append(e)
246 246
247 247 class lazymap(object):
248 248 """a lazy version of the node map"""
249 249 def __init__(self, parser):
250 250 self.p = parser
251 251 def load(self, key):
252 252 n = self.p.findnode(key)
253 253 if n == None:
254 254 raise KeyError(key)
255 255 def __contains__(self, key):
256 256 if key in self.p.map:
257 257 return True
258 258 self.p.loadmap()
259 259 return key in self.p.map
260 260 def __iter__(self):
261 261 yield nullid
262 262 for i in xrange(self.p.l):
263 263 ret = self.p.index[i]
264 264 if not ret:
265 265 self.p.loadindex(i)
266 266 ret = self.p.index[i]
267 267 if isinstance(ret, str):
268 268 ret = struct.unpack(self.p.indexformat, ret)
269 269 yield ret[-1]
270 270 def __getitem__(self, key):
271 271 try:
272 272 return self.p.map[key]
273 273 except KeyError:
274 274 try:
275 275 self.load(key)
276 276 return self.p.map[key]
277 277 except KeyError:
278 278 raise KeyError("node " + hex(key))
279 279 def __setitem__(self, key, val):
280 280 self.p.map[key] = val
281 281 def __delitem__(self, key):
282 282 del self.p.map[key]
283 283
284 284 class RevlogError(Exception): pass
285 285
286 286 class revlog(object):
287 287 """
288 288 the underlying revision storage object
289 289
290 290 A revlog consists of two parts, an index and the revision data.
291 291
292 292 The index is a file with a fixed record size containing
293 293 information on each revision, includings its nodeid (hash), the
294 294 nodeids of its parents, the position and offset of its data within
295 295 the data file, and the revision it's based on. Finally, each entry
296 296 contains a linkrev entry that can serve as a pointer to external
297 297 data.
298 298
299 299 The revision data itself is a linear collection of data chunks.
300 300 Each chunk represents a revision and is usually represented as a
301 301 delta against the previous chunk. To bound lookup time, runs of
302 302 deltas are limited to about 2 times the length of the original
303 303 version data. This makes retrieval of a version proportional to
304 304 its size, or O(1) relative to the number of revisions.
305 305
306 306 Both pieces of the revlog are written to in an append-only
307 307 fashion, which means we never need to rewrite a file to insert or
308 308 remove data, and can use some simple techniques to avoid the need
309 309 for locking while reading.
310 310 """
311 311 def __init__(self, opener, indexfile, datafile,
312 312 defversion=REVLOG_DEFAULT_VERSION):
313 313 """
314 314 create a revlog object
315 315
316 316 opener is a function that abstracts the file opening operation
317 317 and can be used to implement COW semantics or the like.
318 318 """
319 319 self.indexfile = indexfile
320 320 self.datafile = datafile
321 321 self.opener = opener
322 322
323 323 self.indexstat = None
324 324 self.cache = None
325 325 self.chunkcache = None
326 326 self.defversion = defversion
327 327 self.load()
328 328
329 329 def load(self):
330 330 v = self.defversion
331 331 try:
332 332 f = self.opener(self.indexfile)
333 333 i = f.read(4)
334 334 f.seek(0)
335 335 except IOError, inst:
336 336 if inst.errno != errno.ENOENT:
337 337 raise
338 338 i = ""
339 339 else:
340 340 try:
341 341 st = util.fstat(f)
342 342 except AttributeError, inst:
343 343 st = None
344 344 else:
345 345 oldst = self.indexstat
346 346 if (oldst and st.st_dev == oldst.st_dev
347 347 and st.st_ino == oldst.st_ino
348 348 and st.st_mtime == oldst.st_mtime
349 349 and st.st_ctime == oldst.st_ctime):
350 350 return
351 351 self.indexstat = st
352 352 if len(i) > 0:
353 353 v = struct.unpack(versionformat, i)[0]
354 354 flags = v & ~0xFFFF
355 355 fmt = v & 0xFFFF
356 356 if fmt == REVLOGV0:
357 357 if flags:
358 358 raise RevlogError(_("index %s unknown flags %#04x for format v0")
359 359 % (self.indexfile, flags >> 16))
360 360 elif fmt == REVLOGNG:
361 361 if flags & ~REVLOGNGINLINEDATA:
362 362 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
363 363 % (self.indexfile, flags >> 16))
364 364 else:
365 365 raise RevlogError(_("index %s unknown format %d")
366 366 % (self.indexfile, fmt))
367 367 self.version = v
368 368 if v == REVLOGV0:
369 369 self.indexformat = indexformatv0
370 370 shaoffset = v0shaoffset
371 371 else:
372 372 self.indexformat = indexformatng
373 373 shaoffset = ngshaoffset
374 374
375 375 if i:
376 376 if (lazyparser.safe_to_use and not self.inlinedata() and
377 377 st and st.st_size > 10000):
378 378 # big index, let's parse it on demand
379 379 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
380 380 self.index = lazyindex(parser)
381 381 self.nodemap = lazymap(parser)
382 382 else:
383 383 self.parseindex(f, st)
384 384 if self.version != REVLOGV0:
385 385 e = list(self.index[0])
386 386 type = self.ngtype(e[0])
387 387 e[0] = self.offset_type(0, type)
388 388 self.index[0] = e
389 389 else:
390 390 self.nodemap = {nullid: nullrev}
391 391 self.index = []
392 392
393 393
394 394 def parseindex(self, fp, st):
395 395 s = struct.calcsize(self.indexformat)
396 396 self.index = []
397 397 self.nodemap = {nullid: nullrev}
398 398 inline = self.inlinedata()
399 399 n = 0
400 400 leftover = None
401 401 while True:
402 402 if st:
403 403 data = fp.read(65536)
404 404 else:
405 405 # hack for httprangereader, it doesn't do partial reads well
406 406 data = fp.read()
407 407 if not data:
408 408 break
409 409 if n == 0 and self.inlinedata():
410 410 # cache the first chunk
411 411 self.chunkcache = (0, data)
412 412 if leftover:
413 413 data = leftover + data
414 414 leftover = None
415 415 off = 0
416 416 l = len(data)
417 417 while off < l:
418 418 if l - off < s:
419 419 leftover = data[off:]
420 420 break
421 421 cur = data[off:off + s]
422 422 off += s
423 423 e = struct.unpack(self.indexformat, cur)
424 424 self.index.append(e)
425 425 self.nodemap[e[-1]] = n
426 426 n += 1
427 427 if inline:
428 428 off += e[1]
429 429 if off > l:
430 430 # some things don't seek well, just read it
431 431 fp.read(off - l)
432 432 if not st:
433 433 break
434 434
435 435
436 436 def ngoffset(self, q):
437 437 if q & 0xFFFF:
438 438 raise RevlogError(_('%s: incompatible revision flag %x') %
439 439 (self.indexfile, q))
440 440 return long(q >> 16)
441 441
442 442 def ngtype(self, q):
443 443 return int(q & 0xFFFF)
444 444
445 445 def offset_type(self, offset, type):
446 446 return long(long(offset) << 16 | type)
447 447
448 448 def loadindex(self, start, end):
449 449 """load a block of indexes all at once from the lazy parser"""
450 450 if isinstance(self.index, lazyindex):
451 451 self.index.p.loadindex(start, end)
452 452
453 453 def loadindexmap(self):
454 454 """loads both the map and the index from the lazy parser"""
455 455 if isinstance(self.index, lazyindex):
456 456 p = self.index.p
457 457 p.loadindex()
458 458 self.nodemap = p.map
459 459
460 460 def loadmap(self):
461 461 """loads the map from the lazy parser"""
462 462 if isinstance(self.nodemap, lazymap):
463 463 self.nodemap.p.loadmap()
464 464 self.nodemap = self.nodemap.p.map
465 465
466 466 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
467 467 def tip(self): return self.node(len(self.index) - 1)
468 468 def count(self): return len(self.index)
469 469 def node(self, rev):
470 470 return rev == nullrev and nullid or self.index[rev][-1]
471 471 def rev(self, node):
472 472 try:
473 473 return self.nodemap[node]
474 474 except KeyError:
475 475 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
476 476 def linkrev(self, node):
477 477 return (node == nullid) and nullrev or self.index[self.rev(node)][-4]
478 478 def parents(self, node):
479 479 if node == nullid: return (nullid, nullid)
480 480 r = self.rev(node)
481 481 d = self.index[r][-3:-1]
482 482 if self.version == REVLOGV0:
483 483 return d
484 484 return (self.node(d[0]), self.node(d[1]))
485 485 def parentrevs(self, rev):
486 486 if rev == nullrev:
487 487 return (nullrev, nullrev)
488 488 d = self.index[rev][-3:-1]
489 489 if self.version == REVLOGV0:
490 490 return (self.rev(d[0]), self.rev(d[1]))
491 491 return d
492 492 def start(self, rev):
493 493 if rev == nullrev:
494 494 return 0
495 495 if self.version != REVLOGV0:
496 496 return self.ngoffset(self.index[rev][0])
497 497 return self.index[rev][0]
498 498
499 499 def end(self, rev): return self.start(rev) + self.length(rev)
500 500
501 501 def size(self, rev):
502 502 """return the length of the uncompressed text for a given revision"""
503 503 if rev == nullrev:
504 504 return 0
505 505 l = -1
506 506 if self.version != REVLOGV0:
507 507 l = self.index[rev][2]
508 508 if l >= 0:
509 509 return l
510 510
511 511 t = self.revision(self.node(rev))
512 512 return len(t)
513 513
514 514 # alternate implementation, The advantage to this code is it
515 515 # will be faster for a single revision. But, the results are not
516 516 # cached, so finding the size of every revision will be slower.
517 517 """
518 518 if self.cache and self.cache[1] == rev:
519 519 return len(self.cache[2])
520 520
521 521 base = self.base(rev)
522 522 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
523 523 base = self.cache[1]
524 524 text = self.cache[2]
525 525 else:
526 526 text = self.revision(self.node(base))
527 527
528 528 l = len(text)
529 529 for x in xrange(base + 1, rev + 1):
530 530 l = mdiff.patchedsize(l, self.chunk(x))
531 531 return l
532 532 """
533 533
534 534 def length(self, rev):
535 535 if rev == nullrev:
536 536 return 0
537 537 else:
538 538 return self.index[rev][1]
539 539 def base(self, rev):
540 540 if (rev == nullrev):
541 541 return nullrev
542 542 else:
543 543 return self.index[rev][-5]
544 544
545 545 def reachable(self, node, stop=None):
546 546 """return a hash of all nodes ancestral to a given node, including
547 547 the node itself, stopping when stop is matched"""
548 548 reachable = {}
549 549 visit = [node]
550 550 reachable[node] = 1
551 551 if stop:
552 552 stopn = self.rev(stop)
553 553 else:
554 554 stopn = 0
555 555 while visit:
556 556 n = visit.pop(0)
557 557 if n == stop:
558 558 continue
559 559 if n == nullid:
560 560 continue
561 561 for p in self.parents(n):
562 562 if self.rev(p) < stopn:
563 563 continue
564 564 if p not in reachable:
565 565 reachable[p] = 1
566 566 visit.append(p)
567 567 return reachable
568 568
569 569 def nodesbetween(self, roots=None, heads=None):
570 570 """Return a tuple containing three elements. Elements 1 and 2 contain
571 571 a final list bases and heads after all the unreachable ones have been
572 572 pruned. Element 0 contains a topologically sorted list of all
573 573
574 574 nodes that satisfy these constraints:
575 575 1. All nodes must be descended from a node in roots (the nodes on
576 576 roots are considered descended from themselves).
577 577 2. All nodes must also be ancestors of a node in heads (the nodes in
578 578 heads are considered to be their own ancestors).
579 579
580 580 If roots is unspecified, nullid is assumed as the only root.
581 581 If heads is unspecified, it is taken to be the output of the
582 582 heads method (i.e. a list of all nodes in the repository that
583 583 have no children)."""
584 584 nonodes = ([], [], [])
585 585 if roots is not None:
586 586 roots = list(roots)
587 587 if not roots:
588 588 return nonodes
589 589 lowestrev = min([self.rev(n) for n in roots])
590 590 else:
591 591 roots = [nullid] # Everybody's a descendent of nullid
592 592 lowestrev = nullrev
593 593 if (lowestrev == nullrev) and (heads is None):
594 594 # We want _all_ the nodes!
595 595 return ([self.node(r) for r in xrange(0, self.count())],
596 596 [nullid], list(self.heads()))
597 597 if heads is None:
598 598 # All nodes are ancestors, so the latest ancestor is the last
599 599 # node.
600 600 highestrev = self.count() - 1
601 601 # Set ancestors to None to signal that every node is an ancestor.
602 602 ancestors = None
603 603 # Set heads to an empty dictionary for later discovery of heads
604 604 heads = {}
605 605 else:
606 606 heads = list(heads)
607 607 if not heads:
608 608 return nonodes
609 609 ancestors = {}
610 610 # Turn heads into a dictionary so we can remove 'fake' heads.
611 611 # Also, later we will be using it to filter out the heads we can't
612 612 # find from roots.
613 613 heads = dict.fromkeys(heads, 0)
614 614 # Start at the top and keep marking parents until we're done.
615 615 nodestotag = heads.keys()
616 616 # Remember where the top was so we can use it as a limit later.
617 617 highestrev = max([self.rev(n) for n in nodestotag])
618 618 while nodestotag:
619 619 # grab a node to tag
620 620 n = nodestotag.pop()
621 621 # Never tag nullid
622 622 if n == nullid:
623 623 continue
624 624 # A node's revision number represents its place in a
625 625 # topologically sorted list of nodes.
626 626 r = self.rev(n)
627 627 if r >= lowestrev:
628 628 if n not in ancestors:
629 629 # If we are possibly a descendent of one of the roots
630 630 # and we haven't already been marked as an ancestor
631 631 ancestors[n] = 1 # Mark as ancestor
632 632 # Add non-nullid parents to list of nodes to tag.
633 633 nodestotag.extend([p for p in self.parents(n) if
634 634 p != nullid])
635 635 elif n in heads: # We've seen it before, is it a fake head?
636 636 # So it is, real heads should not be the ancestors of
637 637 # any other heads.
638 638 heads.pop(n)
639 639 if not ancestors:
640 640 return nonodes
641 641 # Now that we have our set of ancestors, we want to remove any
642 642 # roots that are not ancestors.
643 643
644 644 # If one of the roots was nullid, everything is included anyway.
645 645 if lowestrev > nullrev:
646 646 # But, since we weren't, let's recompute the lowest rev to not
647 647 # include roots that aren't ancestors.
648 648
649 649 # Filter out roots that aren't ancestors of heads
650 650 roots = [n for n in roots if n in ancestors]
651 651 # Recompute the lowest revision
652 652 if roots:
653 653 lowestrev = min([self.rev(n) for n in roots])
654 654 else:
655 655 # No more roots? Return empty list
656 656 return nonodes
657 657 else:
658 658 # We are descending from nullid, and don't need to care about
659 659 # any other roots.
660 660 lowestrev = nullrev
661 661 roots = [nullid]
662 662 # Transform our roots list into a 'set' (i.e. a dictionary where the
663 663 # values don't matter.
664 664 descendents = dict.fromkeys(roots, 1)
665 665 # Also, keep the original roots so we can filter out roots that aren't
666 666 # 'real' roots (i.e. are descended from other roots).
667 667 roots = descendents.copy()
668 668 # Our topologically sorted list of output nodes.
669 669 orderedout = []
670 670 # Don't start at nullid since we don't want nullid in our output list,
671 671 # and if nullid shows up in descedents, empty parents will look like
672 672 # they're descendents.
673 673 for r in xrange(max(lowestrev, 0), highestrev + 1):
674 674 n = self.node(r)
675 675 isdescendent = False
676 676 if lowestrev == nullrev: # Everybody is a descendent of nullid
677 677 isdescendent = True
678 678 elif n in descendents:
679 679 # n is already a descendent
680 680 isdescendent = True
681 681 # This check only needs to be done here because all the roots
682 682 # will start being marked is descendents before the loop.
683 683 if n in roots:
684 684 # If n was a root, check if it's a 'real' root.
685 685 p = tuple(self.parents(n))
686 686 # If any of its parents are descendents, it's not a root.
687 687 if (p[0] in descendents) or (p[1] in descendents):
688 688 roots.pop(n)
689 689 else:
690 690 p = tuple(self.parents(n))
691 691 # A node is a descendent if either of its parents are
692 692 # descendents. (We seeded the dependents list with the roots
693 693 # up there, remember?)
694 694 if (p[0] in descendents) or (p[1] in descendents):
695 695 descendents[n] = 1
696 696 isdescendent = True
697 697 if isdescendent and ((ancestors is None) or (n in ancestors)):
698 698 # Only include nodes that are both descendents and ancestors.
699 699 orderedout.append(n)
700 700 if (ancestors is not None) and (n in heads):
701 701 # We're trying to figure out which heads are reachable
702 702 # from roots.
703 703 # Mark this head as having been reached
704 704 heads[n] = 1
705 705 elif ancestors is None:
706 706 # Otherwise, we're trying to discover the heads.
707 707 # Assume this is a head because if it isn't, the next step
708 708 # will eventually remove it.
709 709 heads[n] = 1
710 710 # But, obviously its parents aren't.
711 711 for p in self.parents(n):
712 712 heads.pop(p, None)
713 713 heads = [n for n in heads.iterkeys() if heads[n] != 0]
714 714 roots = roots.keys()
715 715 assert orderedout
716 716 assert roots
717 717 assert heads
718 718 return (orderedout, roots, heads)
719 719
720 def heads(self, start=None):
720 def heads(self, start=None, stop=None):
721 721 """return the list of all nodes that have no children
722 722
723 723 if start is specified, only heads that are descendants of
724 724 start will be returned
725
725 if stop is specified, it will consider all the revs from stop
726 as if they had no children
726 727 """
727 728 if start is None:
728 729 start = nullid
730 if stop is None:
731 stop = []
732 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
729 733 startrev = self.rev(start)
730 734 reachable = {startrev: 1}
731 735 heads = {startrev: 1}
732 736
733 737 parentrevs = self.parentrevs
734 738 for r in xrange(startrev + 1, self.count()):
735 739 for p in parentrevs(r):
736 740 if p in reachable:
737 reachable[r] = 1
741 if r not in stoprevs:
742 reachable[r] = 1
738 743 heads[r] = 1
739 if p in heads:
744 if p in heads and p not in stoprevs:
740 745 del heads[p]
746
741 747 return [self.node(r) for r in heads]
742 748
743 749 def children(self, node):
744 750 """find the children of a given node"""
745 751 c = []
746 752 p = self.rev(node)
747 753 for r in range(p + 1, self.count()):
748 754 for pr in self.parentrevs(r):
749 755 if pr == p:
750 756 c.append(self.node(r))
751 757 return c
752 758
753 759 def _match(self, id):
754 760 if isinstance(id, (long, int)):
755 761 # rev
756 762 return self.node(id)
757 763 if len(id) == 20:
758 764 # possibly a binary node
759 765 # odds of a binary node being all hex in ASCII are 1 in 10**25
760 766 try:
761 767 node = id
762 768 r = self.rev(node) # quick search the index
763 769 return node
764 770 except RevlogError:
765 771 pass # may be partial hex id
766 772 try:
767 773 # str(rev)
768 774 rev = int(id)
769 775 if str(rev) != id: raise ValueError
770 776 if rev < 0: rev = self.count() + rev
771 777 if rev < 0 or rev >= self.count(): raise ValueError
772 778 return self.node(rev)
773 779 except (ValueError, OverflowError):
774 780 pass
775 781 if len(id) == 40:
776 782 try:
777 783 # a full hex nodeid?
778 784 node = bin(id)
779 785 r = self.rev(node)
780 786 return node
781 787 except TypeError:
782 788 pass
783 789
784 790 def _partialmatch(self, id):
785 791 if len(id) < 40:
786 792 try:
787 793 # hex(node)[:...]
788 794 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
789 795 node = None
790 796 for n in self.nodemap:
791 797 if n.startswith(bin_id) and hex(n).startswith(id):
792 798 if node is not None:
793 799 raise RevlogError(_("Ambiguous identifier"))
794 800 node = n
795 801 if node is not None:
796 802 return node
797 803 except TypeError:
798 804 pass
799 805
800 806 def lookup(self, id):
801 807 """locate a node based on:
802 808 - revision number or str(revision number)
803 809 - nodeid or subset of hex nodeid
804 810 """
805 811
806 812 n = self._match(id)
807 813 if n is not None:
808 814 return n
809 815 n = self._partialmatch(id)
810 816 if n:
811 817 return n
812 818
813 819 raise RevlogError(_("No match found"))
814 820
815 821 def cmp(self, node, text):
816 822 """compare text with a given file revision"""
817 823 p1, p2 = self.parents(node)
818 824 return hash(text, p1, p2) != node
819 825
820 826 def makenode(self, node, text):
821 827 """calculate a file nodeid for text, descended or possibly
822 828 unchanged from node"""
823 829
824 830 if self.cmp(node, text):
825 831 return hash(text, node, nullid)
826 832 return node
827 833
828 834 def diff(self, a, b):
829 835 """return a delta between two revisions"""
830 836 return mdiff.textdiff(a, b)
831 837
832 838 def patches(self, t, pl):
833 839 """apply a list of patches to a string"""
834 840 return mdiff.patches(t, pl)
835 841
836 842 def chunk(self, rev, df=None, cachelen=4096):
837 843 start, length = self.start(rev), self.length(rev)
838 844 inline = self.inlinedata()
839 845 if inline:
840 846 start += (rev + 1) * struct.calcsize(self.indexformat)
841 847 end = start + length
842 848 def loadcache(df):
843 849 cache_length = max(cachelen, length) # 4k
844 850 if not df:
845 851 if inline:
846 852 df = self.opener(self.indexfile)
847 853 else:
848 854 df = self.opener(self.datafile)
849 855 df.seek(start)
850 856 self.chunkcache = (start, df.read(cache_length))
851 857
852 858 if not self.chunkcache:
853 859 loadcache(df)
854 860
855 861 cache_start = self.chunkcache[0]
856 862 cache_end = cache_start + len(self.chunkcache[1])
857 863 if start >= cache_start and end <= cache_end:
858 864 # it is cached
859 865 offset = start - cache_start
860 866 else:
861 867 loadcache(df)
862 868 offset = 0
863 869
864 870 #def checkchunk():
865 871 # df = self.opener(self.datafile)
866 872 # df.seek(start)
867 873 # return df.read(length)
868 874 #assert s == checkchunk()
869 875 return decompress(self.chunkcache[1][offset:offset + length])
870 876
871 877 def delta(self, node):
872 878 """return or calculate a delta between a node and its predecessor"""
873 879 r = self.rev(node)
874 880 return self.revdiff(r - 1, r)
875 881
876 882 def revdiff(self, rev1, rev2):
877 883 """return or calculate a delta between two revisions"""
878 884 b1 = self.base(rev1)
879 885 b2 = self.base(rev2)
880 886 if b1 == b2 and rev1 + 1 == rev2:
881 887 return self.chunk(rev2)
882 888 else:
883 889 return self.diff(self.revision(self.node(rev1)),
884 890 self.revision(self.node(rev2)))
885 891
886 892 def revision(self, node):
887 893 """return an uncompressed revision of a given"""
888 894 if node == nullid: return ""
889 895 if self.cache and self.cache[0] == node: return self.cache[2]
890 896
891 897 # look up what we need to read
892 898 text = None
893 899 rev = self.rev(node)
894 900 base = self.base(rev)
895 901
896 902 if self.inlinedata():
897 903 # we probably have the whole chunk cached
898 904 df = None
899 905 else:
900 906 df = self.opener(self.datafile)
901 907
902 908 # do we have useful data cached?
903 909 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
904 910 base = self.cache[1]
905 911 text = self.cache[2]
906 912 self.loadindex(base, rev + 1)
907 913 else:
908 914 self.loadindex(base, rev + 1)
909 915 text = self.chunk(base, df=df)
910 916
911 917 bins = []
912 918 for r in xrange(base + 1, rev + 1):
913 919 bins.append(self.chunk(r, df=df))
914 920
915 921 text = self.patches(text, bins)
916 922
917 923 p1, p2 = self.parents(node)
918 924 if node != hash(text, p1, p2):
919 925 raise RevlogError(_("integrity check failed on %s:%d")
920 926 % (self.datafile, rev))
921 927
922 928 self.cache = (node, rev, text)
923 929 return text
924 930
925 931 def checkinlinesize(self, tr, fp=None):
926 932 if not self.inlinedata():
927 933 return
928 934 if not fp:
929 935 fp = self.opener(self.indexfile, 'r')
930 936 fp.seek(0, 2)
931 937 size = fp.tell()
932 938 if size < 131072:
933 939 return
934 940 trinfo = tr.find(self.indexfile)
935 941 if trinfo == None:
936 942 raise RevlogError(_("%s not found in the transaction")
937 943 % self.indexfile)
938 944
939 945 trindex = trinfo[2]
940 946 dataoff = self.start(trindex)
941 947
942 948 tr.add(self.datafile, dataoff)
943 949 df = self.opener(self.datafile, 'w')
944 950 calc = struct.calcsize(self.indexformat)
945 951 for r in xrange(self.count()):
946 952 start = self.start(r) + (r + 1) * calc
947 953 length = self.length(r)
948 954 fp.seek(start)
949 955 d = fp.read(length)
950 956 df.write(d)
951 957 fp.close()
952 958 df.close()
953 959 fp = self.opener(self.indexfile, 'w', atomictemp=True)
954 960 self.version &= ~(REVLOGNGINLINEDATA)
955 961 if self.count():
956 962 x = self.index[0]
957 963 e = struct.pack(self.indexformat, *x)[4:]
958 964 l = struct.pack(versionformat, self.version)
959 965 fp.write(l)
960 966 fp.write(e)
961 967
962 968 for i in xrange(1, self.count()):
963 969 x = self.index[i]
964 970 e = struct.pack(self.indexformat, *x)
965 971 fp.write(e)
966 972
967 973 # if we don't call rename, the temp file will never replace the
968 974 # real index
969 975 fp.rename()
970 976
971 977 tr.replace(self.indexfile, trindex * calc)
972 978 self.chunkcache = None
973 979
974 980 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
975 981 """add a revision to the log
976 982
977 983 text - the revision data to add
978 984 transaction - the transaction object used for rollback
979 985 link - the linkrev data to add
980 986 p1, p2 - the parent nodeids of the revision
981 987 d - an optional precomputed delta
982 988 """
983 989 if not self.inlinedata():
984 990 dfh = self.opener(self.datafile, "a")
985 991 else:
986 992 dfh = None
987 993 ifh = self.opener(self.indexfile, "a+")
988 994 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
989 995
990 996 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
991 997 if text is None: text = ""
992 998 if p1 is None: p1 = self.tip()
993 999 if p2 is None: p2 = nullid
994 1000
995 1001 node = hash(text, p1, p2)
996 1002
997 1003 if node in self.nodemap:
998 1004 return node
999 1005
1000 1006 n = self.count()
1001 1007 t = n - 1
1002 1008
1003 1009 if n:
1004 1010 base = self.base(t)
1005 1011 start = self.start(base)
1006 1012 end = self.end(t)
1007 1013 if not d:
1008 1014 prev = self.revision(self.tip())
1009 1015 d = self.diff(prev, text)
1010 1016 data = compress(d)
1011 1017 l = len(data[1]) + len(data[0])
1012 1018 dist = end - start + l
1013 1019
1014 1020 # full versions are inserted when the needed deltas
1015 1021 # become comparable to the uncompressed text
1016 1022 if not n or dist > len(text) * 2:
1017 1023 data = compress(text)
1018 1024 l = len(data[1]) + len(data[0])
1019 1025 base = n
1020 1026 else:
1021 1027 base = self.base(t)
1022 1028
1023 1029 offset = 0
1024 1030 if t >= 0:
1025 1031 offset = self.end(t)
1026 1032
1027 1033 if self.version == REVLOGV0:
1028 1034 e = (offset, l, base, link, p1, p2, node)
1029 1035 else:
1030 1036 e = (self.offset_type(offset, 0), l, len(text),
1031 1037 base, link, self.rev(p1), self.rev(p2), node)
1032 1038
1033 1039 self.index.append(e)
1034 1040 self.nodemap[node] = n
1035 1041 entry = struct.pack(self.indexformat, *e)
1036 1042
1037 1043 if not self.inlinedata():
1038 1044 transaction.add(self.datafile, offset)
1039 1045 transaction.add(self.indexfile, n * len(entry))
1040 1046 if data[0]:
1041 1047 dfh.write(data[0])
1042 1048 dfh.write(data[1])
1043 1049 dfh.flush()
1044 1050 else:
1045 1051 ifh.seek(0, 2)
1046 1052 transaction.add(self.indexfile, ifh.tell(), self.count() - 1)
1047 1053
1048 1054 if len(self.index) == 1 and self.version != REVLOGV0:
1049 1055 l = struct.pack(versionformat, self.version)
1050 1056 ifh.write(l)
1051 1057 entry = entry[4:]
1052 1058
1053 1059 ifh.write(entry)
1054 1060
1055 1061 if self.inlinedata():
1056 1062 ifh.write(data[0])
1057 1063 ifh.write(data[1])
1058 1064 self.checkinlinesize(transaction, ifh)
1059 1065
1060 1066 self.cache = (node, n, text)
1061 1067 return node
1062 1068
1063 1069 def ancestor(self, a, b):
1064 1070 """calculate the least common ancestor of nodes a and b"""
1065 1071
1066 1072 def parents(rev):
1067 1073 return [p for p in self.parentrevs(rev) if p != nullrev]
1068 1074
1069 1075 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1070 1076 if c is None:
1071 1077 return nullid
1072 1078
1073 1079 return self.node(c)
1074 1080
1075 1081 def group(self, nodelist, lookup, infocollect=None):
1076 1082 """calculate a delta group
1077 1083
1078 1084 Given a list of changeset revs, return a set of deltas and
1079 1085 metadata corresponding to nodes. the first delta is
1080 1086 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1081 1087 have this parent as it has all history before these
1082 1088 changesets. parent is parent[0]
1083 1089 """
1084 1090 revs = [self.rev(n) for n in nodelist]
1085 1091
1086 1092 # if we don't have any revisions touched by these changesets, bail
1087 1093 if not revs:
1088 1094 yield changegroup.closechunk()
1089 1095 return
1090 1096
1091 1097 # add the parent of the first rev
1092 1098 p = self.parents(self.node(revs[0]))[0]
1093 1099 revs.insert(0, self.rev(p))
1094 1100
1095 1101 # build deltas
1096 1102 for d in xrange(0, len(revs) - 1):
1097 1103 a, b = revs[d], revs[d + 1]
1098 1104 nb = self.node(b)
1099 1105
1100 1106 if infocollect is not None:
1101 1107 infocollect(nb)
1102 1108
1103 1109 d = self.revdiff(a, b)
1104 1110 p = self.parents(nb)
1105 1111 meta = nb + p[0] + p[1] + lookup(nb)
1106 1112 yield changegroup.genchunk("%s%s" % (meta, d))
1107 1113
1108 1114 yield changegroup.closechunk()
1109 1115
1110 1116 def addgroup(self, revs, linkmapper, transaction, unique=0):
1111 1117 """
1112 1118 add a delta group
1113 1119
1114 1120 given a set of deltas, add them to the revision log. the
1115 1121 first delta is against its parent, which should be in our
1116 1122 log, the rest are against the previous delta.
1117 1123 """
1118 1124
1119 1125 #track the base of the current delta log
1120 1126 r = self.count()
1121 1127 t = r - 1
1122 1128 node = None
1123 1129
1124 1130 base = prev = nullrev
1125 1131 start = end = textlen = 0
1126 1132 if r:
1127 1133 end = self.end(t)
1128 1134
1129 1135 ifh = self.opener(self.indexfile, "a+")
1130 1136 ifh.seek(0, 2)
1131 1137 transaction.add(self.indexfile, ifh.tell(), self.count())
1132 1138 if self.inlinedata():
1133 1139 dfh = None
1134 1140 else:
1135 1141 transaction.add(self.datafile, end)
1136 1142 dfh = self.opener(self.datafile, "a")
1137 1143
1138 1144 # loop through our set of deltas
1139 1145 chain = None
1140 1146 for chunk in revs:
1141 1147 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1142 1148 link = linkmapper(cs)
1143 1149 if node in self.nodemap:
1144 1150 # this can happen if two branches make the same change
1145 1151 # if unique:
1146 1152 # raise RevlogError(_("already have %s") % hex(node[:4]))
1147 1153 chain = node
1148 1154 continue
1149 1155 delta = chunk[80:]
1150 1156
1151 1157 for p in (p1, p2):
1152 1158 if not p in self.nodemap:
1153 1159 raise RevlogError(_("unknown parent %s") % short(p))
1154 1160
1155 1161 if not chain:
1156 1162 # retrieve the parent revision of the delta chain
1157 1163 chain = p1
1158 1164 if not chain in self.nodemap:
1159 1165 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1160 1166
1161 1167 # full versions are inserted when the needed deltas become
1162 1168 # comparable to the uncompressed text or when the previous
1163 1169 # version is not the one we have a delta against. We use
1164 1170 # the size of the previous full rev as a proxy for the
1165 1171 # current size.
1166 1172
1167 1173 if chain == prev:
1168 1174 tempd = compress(delta)
1169 1175 cdelta = tempd[0] + tempd[1]
1170 1176 textlen = mdiff.patchedsize(textlen, delta)
1171 1177
1172 1178 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1173 1179 # flush our writes here so we can read it in revision
1174 1180 if dfh:
1175 1181 dfh.flush()
1176 1182 ifh.flush()
1177 1183 text = self.revision(chain)
1178 1184 text = self.patches(text, [delta])
1179 1185 chk = self._addrevision(text, transaction, link, p1, p2, None,
1180 1186 ifh, dfh)
1181 1187 if not dfh and not self.inlinedata():
1182 1188 # addrevision switched from inline to conventional
1183 1189 # reopen the index
1184 1190 dfh = self.opener(self.datafile, "a")
1185 1191 ifh = self.opener(self.indexfile, "a")
1186 1192 if chk != node:
1187 1193 raise RevlogError(_("consistency error adding group"))
1188 1194 textlen = len(text)
1189 1195 else:
1190 1196 if self.version == REVLOGV0:
1191 1197 e = (end, len(cdelta), base, link, p1, p2, node)
1192 1198 else:
1193 1199 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1194 1200 link, self.rev(p1), self.rev(p2), node)
1195 1201 self.index.append(e)
1196 1202 self.nodemap[node] = r
1197 1203 if self.inlinedata():
1198 1204 ifh.write(struct.pack(self.indexformat, *e))
1199 1205 ifh.write(cdelta)
1200 1206 self.checkinlinesize(transaction, ifh)
1201 1207 if not self.inlinedata():
1202 1208 dfh = self.opener(self.datafile, "a")
1203 1209 ifh = self.opener(self.indexfile, "a")
1204 1210 else:
1205 1211 dfh.write(cdelta)
1206 1212 ifh.write(struct.pack(self.indexformat, *e))
1207 1213
1208 1214 t, r, chain, prev = r, r + 1, node, node
1209 1215 base = self.base(t)
1210 1216 start = self.start(base)
1211 1217 end = self.end(t)
1212 1218
1213 1219 return node
1214 1220
1215 1221 def strip(self, rev, minlink):
1216 1222 if self.count() == 0 or rev >= self.count():
1217 1223 return
1218 1224
1219 1225 if isinstance(self.index, lazyindex):
1220 1226 self.loadindexmap()
1221 1227
1222 1228 # When stripping away a revision, we need to make sure it
1223 1229 # does not actually belong to an older changeset.
1224 1230 # The minlink parameter defines the oldest revision
1225 1231 # we're allowed to strip away.
1226 1232 while minlink > self.index[rev][-4]:
1227 1233 rev += 1
1228 1234 if rev >= self.count():
1229 1235 return
1230 1236
1231 1237 # first truncate the files on disk
1232 1238 end = self.start(rev)
1233 1239 if not self.inlinedata():
1234 1240 df = self.opener(self.datafile, "a")
1235 1241 df.truncate(end)
1236 1242 end = rev * struct.calcsize(self.indexformat)
1237 1243 else:
1238 1244 end += rev * struct.calcsize(self.indexformat)
1239 1245
1240 1246 indexf = self.opener(self.indexfile, "a")
1241 1247 indexf.truncate(end)
1242 1248
1243 1249 # then reset internal state in memory to forget those revisions
1244 1250 self.cache = None
1245 1251 self.chunkcache = None
1246 1252 for x in xrange(rev, self.count()):
1247 1253 del self.nodemap[self.node(x)]
1248 1254
1249 1255 del self.index[rev:]
1250 1256
1251 1257 def checksize(self):
1252 1258 expected = 0
1253 1259 if self.count():
1254 1260 expected = self.end(self.count() - 1)
1255 1261
1256 1262 try:
1257 1263 f = self.opener(self.datafile)
1258 1264 f.seek(0, 2)
1259 1265 actual = f.tell()
1260 1266 dd = actual - expected
1261 1267 except IOError, inst:
1262 1268 if inst.errno != errno.ENOENT:
1263 1269 raise
1264 1270 dd = 0
1265 1271
1266 1272 try:
1267 1273 f = self.opener(self.indexfile)
1268 1274 f.seek(0, 2)
1269 1275 actual = f.tell()
1270 1276 s = struct.calcsize(self.indexformat)
1271 1277 i = actual / s
1272 1278 di = actual - (i * s)
1273 1279 if self.inlinedata():
1274 1280 databytes = 0
1275 1281 for r in xrange(self.count()):
1276 1282 databytes += self.length(r)
1277 1283 dd = 0
1278 1284 di = actual - self.count() * s - databytes
1279 1285 except IOError, inst:
1280 1286 if inst.errno != errno.ENOENT:
1281 1287 raise
1282 1288 di = 0
1283 1289
1284 1290 return (dd, di)
1285 1291
1286 1292
@@ -1,57 +1,62 b''
1 1 #!/bin/sh
2 2
3 3 mkdir a
4 4 cd a
5 5 hg init
6 6 echo foo > t1
7 7 hg add t1
8 8 hg commit -m "1" -d "1000000 0"
9 9
10 10 cd ..
11 11 hg clone a b
12 12
13 13 cd a
14 14 echo foo > t2
15 15 hg add t2
16 16 hg commit -m "2" -d "1000000 0"
17 17
18 18 cd ../b
19 19 echo foo > t3
20 20 hg add t3
21 21 hg commit -m "3" -d "1000000 0"
22 22
23 23 hg push ../a
24 24 hg pull ../a
25 25 hg push ../a
26 26 hg merge
27 27 hg commit -m "4" -d "1000000 0"
28 28 hg push ../a
29 29 cd ..
30 30
31 31 hg init c
32 32 cd c
33 33 for i in 0 1 2; do
34 34 echo $i >> foo
35 35 hg ci -Am $i -d "1000000 0"
36 36 done
37 37 cd ..
38 38
39 39 hg clone c d
40 40 cd d
41 41 for i in 0 1; do
42 42 hg co -C $i
43 43 echo d-$i >> foo
44 44 hg ci -m d-$i -d "1000000 0"
45 45 done
46 46
47 47 HGMERGE=true hg merge 3
48 48 hg ci -m c-d -d "1000000 0"
49 49
50 50 hg push ../c; echo $?
51 51 hg push -r 2 ../c; echo $?
52 52 hg push -r 3 ../c; echo $?
53 53 hg push -r 3 -r 4 ../c; echo $?
54 54 hg push -f -r 3 -r 4 ../c; echo $?
55 55 hg push -r 5 ../c; echo $?
56 56
57 # issue 450
58 hg init ../e
59 hg push -r 0 ../e ; echo $?
60 hg push -r 1 ../e ; echo $?
61
57 62 exit 0
@@ -1,64 +1,78 b''
1 1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 2 pushing to ../a
3 3 searching for changes
4 4 abort: push creates new remote branches!
5 5 (did you forget to merge? use push -f to force)
6 6 pulling from ../a
7 7 searching for changes
8 8 adding changesets
9 9 adding manifests
10 10 adding file changes
11 11 added 1 changesets with 1 changes to 1 files (+1 heads)
12 12 (run 'hg heads' to see heads, 'hg merge' to merge)
13 13 pushing to ../a
14 14 searching for changes
15 15 abort: push creates new remote branches!
16 16 (did you forget to merge? use push -f to force)
17 17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 18 (branch merge, don't forget to commit)
19 19 pushing to ../a
20 20 searching for changes
21 21 adding changesets
22 22 adding manifests
23 23 adding file changes
24 24 added 2 changesets with 1 changes to 1 files
25 25 adding foo
26 26 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 29 merging foo
30 30 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
31 31 (branch merge, don't forget to commit)
32 32 pushing to ../c
33 33 searching for changes
34 34 abort: push creates new remote branches!
35 35 (did you forget to merge? use push -f to force)
36 36 0
37 37 pushing to ../c
38 38 searching for changes
39 39 no changes found
40 40 0
41 41 pushing to ../c
42 42 searching for changes
43 43 abort: push creates new remote branches!
44 44 (did you forget to merge? use push -f to force)
45 45 0
46 46 pushing to ../c
47 47 searching for changes
48 48 abort: push creates new remote branches!
49 49 (did you forget to merge? use push -f to force)
50 50 0
51 51 pushing to ../c
52 52 searching for changes
53 53 adding changesets
54 54 adding manifests
55 55 adding file changes
56 56 added 2 changesets with 2 changes to 1 files (+2 heads)
57 57 0
58 58 pushing to ../c
59 59 searching for changes
60 60 adding changesets
61 61 adding manifests
62 62 adding file changes
63 63 added 1 changesets with 1 changes to 1 files (-1 heads)
64 64 0
65 pushing to ../e
66 searching for changes
67 adding changesets
68 adding manifests
69 adding file changes
70 added 1 changesets with 1 changes to 1 files
71 0
72 pushing to ../e
73 searching for changes
74 adding changesets
75 adding manifests
76 adding file changes
77 added 1 changesets with 1 changes to 1 files
78 0
General Comments 0
You need to be logged in to leave comments. Login now