##// END OF EJS Templates
store: change handling of decoding errors
Matt Mackall -
r6900:def492d1 default
parent child Browse files
Show More
@@ -1,2088 +1,2088 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 38 # create an invalid changelog
39 39 self.opener("00changelog.i", "a").write(
40 40 '\0\0\0\2' # represents revlogv2
41 41 ' dummy changelog to prevent using the old repo layout'
42 42 )
43 43 reqfile = self.opener("requires", "w")
44 44 for r in requirements:
45 45 reqfile.write("%s\n" % r)
46 46 reqfile.close()
47 47 else:
48 48 raise repo.RepoError(_("repository %s not found") % path)
49 49 elif create:
50 50 raise repo.RepoError(_("repository %s already exists") % path)
51 51 else:
52 52 # find requirements
53 53 requirements = []
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 for r in requirements:
57 57 if r not in self.supported:
58 58 raise repo.RepoError(_("requirement '%s' not supported") % r)
59 59 except IOError, inst:
60 60 if inst.errno != errno.ENOENT:
61 61 raise
62 62
63 63 self.store = store.store(requirements, self.path, util.opener)
64 64 self.spath = self.store.path
65 65 self.sopener = self.store.opener
66 66 self.sjoin = self.store.join
67 67 self.opener.createmode = self.store.createmode
68 68
69 69 self.ui = ui.ui(parentui=parentui)
70 70 try:
71 71 self.ui.readconfig(self.join("hgrc"), self.root)
72 72 extensions.loadall(self.ui)
73 73 except IOError:
74 74 pass
75 75
76 76 self.tagscache = None
77 77 self._tagstypecache = None
78 78 self.branchcache = None
79 79 self._ubranchcache = None # UTF-8 version of branchcache
80 80 self._branchcachetip = None
81 81 self.nodetagscache = None
82 82 self.filterpats = {}
83 83 self._datafilters = {}
84 84 self._transref = self._lockref = self._wlockref = None
85 85
86 86 def __getattr__(self, name):
87 87 if name == 'changelog':
88 88 self.changelog = changelog.changelog(self.sopener)
89 89 self.sopener.defversion = self.changelog.version
90 90 return self.changelog
91 91 if name == 'manifest':
92 92 self.changelog
93 93 self.manifest = manifest.manifest(self.sopener)
94 94 return self.manifest
95 95 if name == 'dirstate':
96 96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 97 return self.dirstate
98 98 else:
99 99 raise AttributeError, name
100 100
101 101 def __getitem__(self, changeid):
102 102 if changeid == None:
103 103 return context.workingctx(self)
104 104 return context.changectx(self, changeid)
105 105
106 106 def __nonzero__(self):
107 107 return True
108 108
109 109 def __len__(self):
110 110 return len(self.changelog)
111 111
112 112 def __iter__(self):
113 113 for i in xrange(len(self)):
114 114 yield i
115 115
116 116 def url(self):
117 117 return 'file:' + self.root
118 118
119 119 def hook(self, name, throw=False, **args):
120 120 return hook.hook(self.ui, self, name, throw, **args)
121 121
122 122 tag_disallowed = ':\r\n'
123 123
124 124 def _tag(self, names, node, message, local, user, date, parent=None,
125 125 extra={}):
126 126 use_dirstate = parent is None
127 127
128 128 if isinstance(names, str):
129 129 allchars = names
130 130 names = (names,)
131 131 else:
132 132 allchars = ''.join(names)
133 133 for c in self.tag_disallowed:
134 134 if c in allchars:
135 135 raise util.Abort(_('%r cannot be used in a tag name') % c)
136 136
137 137 for name in names:
138 138 self.hook('pretag', throw=True, node=hex(node), tag=name,
139 139 local=local)
140 140
141 141 def writetags(fp, names, munge, prevtags):
142 142 fp.seek(0, 2)
143 143 if prevtags and prevtags[-1] != '\n':
144 144 fp.write('\n')
145 145 for name in names:
146 146 m = munge and munge(name) or name
147 147 if self._tagstypecache and name in self._tagstypecache:
148 148 old = self.tagscache.get(name, nullid)
149 149 fp.write('%s %s\n' % (hex(old), m))
150 150 fp.write('%s %s\n' % (hex(node), m))
151 151 fp.close()
152 152
153 153 prevtags = ''
154 154 if local:
155 155 try:
156 156 fp = self.opener('localtags', 'r+')
157 157 except IOError, err:
158 158 fp = self.opener('localtags', 'a')
159 159 else:
160 160 prevtags = fp.read()
161 161
162 162 # local tags are stored in the current charset
163 163 writetags(fp, names, None, prevtags)
164 164 for name in names:
165 165 self.hook('tag', node=hex(node), tag=name, local=local)
166 166 return
167 167
168 168 if use_dirstate:
169 169 try:
170 170 fp = self.wfile('.hgtags', 'rb+')
171 171 except IOError, err:
172 172 fp = self.wfile('.hgtags', 'ab')
173 173 else:
174 174 prevtags = fp.read()
175 175 else:
176 176 try:
177 177 prevtags = self.filectx('.hgtags', parent).data()
178 178 except revlog.LookupError:
179 179 pass
180 180 fp = self.wfile('.hgtags', 'wb')
181 181 if prevtags:
182 182 fp.write(prevtags)
183 183
184 184 # committed tags are stored in UTF-8
185 185 writetags(fp, names, util.fromlocal, prevtags)
186 186
187 187 if use_dirstate and '.hgtags' not in self.dirstate:
188 188 self.add(['.hgtags'])
189 189
190 190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
191 191 extra=extra)
192 192
193 193 for name in names:
194 194 self.hook('tag', node=hex(node), tag=name, local=local)
195 195
196 196 return tagnode
197 197
198 198 def tag(self, names, node, message, local, user, date):
199 199 '''tag a revision with one or more symbolic names.
200 200
201 201 names is a list of strings or, when adding a single tag, names may be a
202 202 string.
203 203
204 204 if local is True, the tags are stored in a per-repository file.
205 205 otherwise, they are stored in the .hgtags file, and a new
206 206 changeset is committed with the change.
207 207
208 208 keyword arguments:
209 209
210 210 local: whether to store tags in non-version-controlled file
211 211 (default False)
212 212
213 213 message: commit message to use if committing
214 214
215 215 user: name of user to use if committing
216 216
217 217 date: date tuple to use if committing'''
218 218
219 219 for x in self.status()[:5]:
220 220 if '.hgtags' in x:
221 221 raise util.Abort(_('working copy of .hgtags is changed '
222 222 '(please commit .hgtags manually)'))
223 223
224 224 self._tag(names, node, message, local, user, date)
225 225
226 226 def tags(self):
227 227 '''return a mapping of tag to node'''
228 228 if self.tagscache:
229 229 return self.tagscache
230 230
231 231 globaltags = {}
232 232 tagtypes = {}
233 233
234 234 def readtags(lines, fn, tagtype):
235 235 filetags = {}
236 236 count = 0
237 237
238 238 def warn(msg):
239 239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
240 240
241 241 for l in lines:
242 242 count += 1
243 243 if not l:
244 244 continue
245 245 s = l.split(" ", 1)
246 246 if len(s) != 2:
247 247 warn(_("cannot parse entry"))
248 248 continue
249 249 node, key = s
250 250 key = util.tolocal(key.strip()) # stored in UTF-8
251 251 try:
252 252 bin_n = bin(node)
253 253 except TypeError:
254 254 warn(_("node '%s' is not well formed") % node)
255 255 continue
256 256 if bin_n not in self.changelog.nodemap:
257 257 warn(_("tag '%s' refers to unknown node") % key)
258 258 continue
259 259
260 260 h = []
261 261 if key in filetags:
262 262 n, h = filetags[key]
263 263 h.append(n)
264 264 filetags[key] = (bin_n, h)
265 265
266 266 for k, nh in filetags.items():
267 267 if k not in globaltags:
268 268 globaltags[k] = nh
269 269 tagtypes[k] = tagtype
270 270 continue
271 271
272 272 # we prefer the global tag if:
273 273 # it supercedes us OR
274 274 # mutual supercedes and it has a higher rank
275 275 # otherwise we win because we're tip-most
276 276 an, ah = nh
277 277 bn, bh = globaltags[k]
278 278 if (bn != an and an in bh and
279 279 (bn not in ah or len(bh) > len(ah))):
280 280 an = bn
281 281 ah.extend([n for n in bh if n not in ah])
282 282 globaltags[k] = an, ah
283 283 tagtypes[k] = tagtype
284 284
285 285 # read the tags file from each head, ending with the tip
286 286 f = None
287 287 for rev, node, fnode in self._hgtagsnodes():
288 288 f = (f and f.filectx(fnode) or
289 289 self.filectx('.hgtags', fileid=fnode))
290 290 readtags(f.data().splitlines(), f, "global")
291 291
292 292 try:
293 293 data = util.fromlocal(self.opener("localtags").read())
294 294 # localtags are stored in the local character set
295 295 # while the internal tag table is stored in UTF-8
296 296 readtags(data.splitlines(), "localtags", "local")
297 297 except IOError:
298 298 pass
299 299
300 300 self.tagscache = {}
301 301 self._tagstypecache = {}
302 302 for k,nh in globaltags.items():
303 303 n = nh[0]
304 304 if n != nullid:
305 305 self.tagscache[k] = n
306 306 self._tagstypecache[k] = tagtypes[k]
307 307 self.tagscache['tip'] = self.changelog.tip()
308 308 return self.tagscache
309 309
310 310 def tagtype(self, tagname):
311 311 '''
312 312 return the type of the given tag. result can be:
313 313
314 314 'local' : a local tag
315 315 'global' : a global tag
316 316 None : tag does not exist
317 317 '''
318 318
319 319 self.tags()
320 320
321 321 return self._tagstypecache.get(tagname)
322 322
323 323 def _hgtagsnodes(self):
324 324 heads = self.heads()
325 325 heads.reverse()
326 326 last = {}
327 327 ret = []
328 328 for node in heads:
329 329 c = self[node]
330 330 rev = c.rev()
331 331 try:
332 332 fnode = c.filenode('.hgtags')
333 333 except revlog.LookupError:
334 334 continue
335 335 ret.append((rev, node, fnode))
336 336 if fnode in last:
337 337 ret[last[fnode]] = None
338 338 last[fnode] = len(ret) - 1
339 339 return [item for item in ret if item]
340 340
341 341 def tagslist(self):
342 342 '''return a list of tags ordered by revision'''
343 343 l = []
344 344 for t, n in self.tags().items():
345 345 try:
346 346 r = self.changelog.rev(n)
347 347 except:
348 348 r = -2 # sort to the beginning of the list if unknown
349 349 l.append((r, t, n))
350 350 return [(t, n) for r, t, n in util.sort(l)]
351 351
352 352 def nodetags(self, node):
353 353 '''return the tags associated with a node'''
354 354 if not self.nodetagscache:
355 355 self.nodetagscache = {}
356 356 for t, n in self.tags().items():
357 357 self.nodetagscache.setdefault(n, []).append(t)
358 358 return self.nodetagscache.get(node, [])
359 359
360 360 def _branchtags(self, partial, lrev):
361 361 tiprev = len(self) - 1
362 362 if lrev != tiprev:
363 363 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365 365
366 366 return partial
367 367
368 368 def branchtags(self):
369 369 tip = self.changelog.tip()
370 370 if self.branchcache is not None and self._branchcachetip == tip:
371 371 return self.branchcache
372 372
373 373 oldtip = self._branchcachetip
374 374 self._branchcachetip = tip
375 375 if self.branchcache is None:
376 376 self.branchcache = {} # avoid recursion in changectx
377 377 else:
378 378 self.branchcache.clear() # keep using the same dict
379 379 if oldtip is None or oldtip not in self.changelog.nodemap:
380 380 partial, last, lrev = self._readbranchcache()
381 381 else:
382 382 lrev = self.changelog.rev(oldtip)
383 383 partial = self._ubranchcache
384 384
385 385 self._branchtags(partial, lrev)
386 386
387 387 # the branch cache is stored on disk as UTF-8, but in the local
388 388 # charset internally
389 389 for k, v in partial.items():
390 390 self.branchcache[util.tolocal(k)] = v
391 391 self._ubranchcache = partial
392 392 return self.branchcache
393 393
394 394 def _readbranchcache(self):
395 395 partial = {}
396 396 try:
397 397 f = self.opener("branch.cache")
398 398 lines = f.read().split('\n')
399 399 f.close()
400 400 except (IOError, OSError):
401 401 return {}, nullid, nullrev
402 402
403 403 try:
404 404 last, lrev = lines.pop(0).split(" ", 1)
405 405 last, lrev = bin(last), int(lrev)
406 406 if lrev >= len(self) or self[lrev].node() != last:
407 407 # invalidate the cache
408 408 raise ValueError('invalidating branch cache (tip differs)')
409 409 for l in lines:
410 410 if not l: continue
411 411 node, label = l.split(" ", 1)
412 412 partial[label.strip()] = bin(node)
413 413 except (KeyboardInterrupt, util.SignalInterrupt):
414 414 raise
415 415 except Exception, inst:
416 416 if self.ui.debugflag:
417 417 self.ui.warn(str(inst), '\n')
418 418 partial, last, lrev = {}, nullid, nullrev
419 419 return partial, last, lrev
420 420
421 421 def _writebranchcache(self, branches, tip, tiprev):
422 422 try:
423 423 f = self.opener("branch.cache", "w", atomictemp=True)
424 424 f.write("%s %s\n" % (hex(tip), tiprev))
425 425 for label, node in branches.iteritems():
426 426 f.write("%s %s\n" % (hex(node), label))
427 427 f.rename()
428 428 except (IOError, OSError):
429 429 pass
430 430
431 431 def _updatebranchcache(self, partial, start, end):
432 432 for r in xrange(start, end):
433 433 c = self[r]
434 434 b = c.branch()
435 435 partial[b] = c.node()
436 436
437 437 def lookup(self, key):
438 438 if key == '.':
439 439 return self.dirstate.parents()[0]
440 440 elif key == 'null':
441 441 return nullid
442 442 n = self.changelog._match(key)
443 443 if n:
444 444 return n
445 445 if key in self.tags():
446 446 return self.tags()[key]
447 447 if key in self.branchtags():
448 448 return self.branchtags()[key]
449 449 n = self.changelog._partialmatch(key)
450 450 if n:
451 451 return n
452 452 try:
453 453 if len(key) == 20:
454 454 key = hex(key)
455 455 except:
456 456 pass
457 457 raise repo.RepoError(_("unknown revision '%s'") % key)
458 458
459 459 def local(self):
460 460 return True
461 461
462 462 def join(self, f):
463 463 return os.path.join(self.path, f)
464 464
465 465 def wjoin(self, f):
466 466 return os.path.join(self.root, f)
467 467
468 468 def rjoin(self, f):
469 469 return os.path.join(self.root, util.pconvert(f))
470 470
471 471 def file(self, f):
472 472 if f[0] == '/':
473 473 f = f[1:]
474 474 return filelog.filelog(self.sopener, f)
475 475
476 476 def changectx(self, changeid):
477 477 return self[changeid]
478 478
479 479 def parents(self, changeid=None):
480 480 '''get list of changectxs for parents of changeid'''
481 481 return self[changeid].parents()
482 482
483 483 def filectx(self, path, changeid=None, fileid=None):
484 484 """changeid can be a changeset revision, node, or tag.
485 485 fileid can be a file revision or node."""
486 486 return context.filectx(self, path, changeid, fileid)
487 487
488 488 def getcwd(self):
489 489 return self.dirstate.getcwd()
490 490
491 491 def pathto(self, f, cwd=None):
492 492 return self.dirstate.pathto(f, cwd)
493 493
494 494 def wfile(self, f, mode='r'):
495 495 return self.wopener(f, mode)
496 496
497 497 def _link(self, f):
498 498 return os.path.islink(self.wjoin(f))
499 499
500 500 def _filter(self, filter, filename, data):
501 501 if filter not in self.filterpats:
502 502 l = []
503 503 for pat, cmd in self.ui.configitems(filter):
504 504 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 505 fn = None
506 506 params = cmd
507 507 for name, filterfn in self._datafilters.iteritems():
508 508 if cmd.startswith(name):
509 509 fn = filterfn
510 510 params = cmd[len(name):].lstrip()
511 511 break
512 512 if not fn:
513 513 fn = lambda s, c, **kwargs: util.filter(s, c)
514 514 # Wrap old filters not supporting keyword arguments
515 515 if not inspect.getargspec(fn)[2]:
516 516 oldfn = fn
517 517 fn = lambda s, c, **kwargs: oldfn(s, c)
518 518 l.append((mf, fn, params))
519 519 self.filterpats[filter] = l
520 520
521 521 for mf, fn, cmd in self.filterpats[filter]:
522 522 if mf(filename):
523 523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 525 break
526 526
527 527 return data
528 528
529 529 def adddatafilter(self, name, filter):
530 530 self._datafilters[name] = filter
531 531
532 532 def wread(self, filename):
533 533 if self._link(filename):
534 534 data = os.readlink(self.wjoin(filename))
535 535 else:
536 536 data = self.wopener(filename, 'r').read()
537 537 return self._filter("encode", filename, data)
538 538
539 539 def wwrite(self, filename, data, flags):
540 540 data = self._filter("decode", filename, data)
541 541 try:
542 542 os.unlink(self.wjoin(filename))
543 543 except OSError:
544 544 pass
545 545 if 'l' in flags:
546 546 self.wopener.symlink(data, filename)
547 547 else:
548 548 self.wopener(filename, 'w').write(data)
549 549 if 'x' in flags:
550 550 util.set_flags(self.wjoin(filename), False, True)
551 551
552 552 def wwritedata(self, filename, data):
553 553 return self._filter("decode", filename, data)
554 554
555 555 def transaction(self):
556 556 if self._transref and self._transref():
557 557 return self._transref().nest()
558 558
559 559 # abort here if the journal already exists
560 560 if os.path.exists(self.sjoin("journal")):
561 561 raise repo.RepoError(_("journal already exists - run hg recover"))
562 562
563 563 # save dirstate for rollback
564 564 try:
565 565 ds = self.opener("dirstate").read()
566 566 except IOError:
567 567 ds = ""
568 568 self.opener("journal.dirstate", "w").write(ds)
569 569 self.opener("journal.branch", "w").write(self.dirstate.branch())
570 570
571 571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
572 572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
573 573 (self.join("journal.branch"), self.join("undo.branch"))]
574 574 tr = transaction.transaction(self.ui.warn, self.sopener,
575 575 self.sjoin("journal"),
576 576 aftertrans(renames),
577 577 self.store.createmode)
578 578 self._transref = weakref.ref(tr)
579 579 return tr
580 580
581 581 def recover(self):
582 582 l = self.lock()
583 583 try:
584 584 if os.path.exists(self.sjoin("journal")):
585 585 self.ui.status(_("rolling back interrupted transaction\n"))
586 586 transaction.rollback(self.sopener, self.sjoin("journal"))
587 587 self.invalidate()
588 588 return True
589 589 else:
590 590 self.ui.warn(_("no interrupted transaction available\n"))
591 591 return False
592 592 finally:
593 593 del l
594 594
595 595 def rollback(self):
596 596 wlock = lock = None
597 597 try:
598 598 wlock = self.wlock()
599 599 lock = self.lock()
600 600 if os.path.exists(self.sjoin("undo")):
601 601 self.ui.status(_("rolling back last transaction\n"))
602 602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 604 try:
605 605 branch = self.opener("undo.branch").read()
606 606 self.dirstate.setbranch(branch)
607 607 except IOError:
608 608 self.ui.warn(_("Named branch could not be reset, "
609 609 "current branch still is: %s\n")
610 610 % util.tolocal(self.dirstate.branch()))
611 611 self.invalidate()
612 612 self.dirstate.invalidate()
613 613 else:
614 614 self.ui.warn(_("no rollback information available\n"))
615 615 finally:
616 616 del lock, wlock
617 617
618 618 def invalidate(self):
619 619 for a in "changelog manifest".split():
620 620 if a in self.__dict__:
621 621 delattr(self, a)
622 622 self.tagscache = None
623 623 self._tagstypecache = None
624 624 self.nodetagscache = None
625 625 self.branchcache = None
626 626 self._ubranchcache = None
627 627 self._branchcachetip = None
628 628
629 629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
630 630 try:
631 631 l = lock.lock(lockname, 0, releasefn, desc=desc)
632 632 except lock.LockHeld, inst:
633 633 if not wait:
634 634 raise
635 635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
636 636 (desc, inst.locker))
637 637 # default to 600 seconds timeout
638 638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
639 639 releasefn, desc=desc)
640 640 if acquirefn:
641 641 acquirefn()
642 642 return l
643 643
644 644 def lock(self, wait=True):
645 645 if self._lockref and self._lockref():
646 646 return self._lockref()
647 647
648 648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
649 649 _('repository %s') % self.origroot)
650 650 self._lockref = weakref.ref(l)
651 651 return l
652 652
653 653 def wlock(self, wait=True):
654 654 if self._wlockref and self._wlockref():
655 655 return self._wlockref()
656 656
657 657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
658 658 self.dirstate.invalidate, _('working directory of %s') %
659 659 self.origroot)
660 660 self._wlockref = weakref.ref(l)
661 661 return l
662 662
663 663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
664 664 """
665 665 commit an individual file as part of a larger transaction
666 666 """
667 667
668 668 fn = fctx.path()
669 669 t = fctx.data()
670 670 fl = self.file(fn)
671 671 fp1 = manifest1.get(fn, nullid)
672 672 fp2 = manifest2.get(fn, nullid)
673 673
674 674 meta = {}
675 675 cp = fctx.renamed()
676 676 if cp and cp[0] != fn:
677 677 # Mark the new revision of this file as a copy of another
678 678 # file. This copy data will effectively act as a parent
679 679 # of this new revision. If this is a merge, the first
680 680 # parent will be the nullid (meaning "look up the copy data")
681 681 # and the second one will be the other parent. For example:
682 682 #
683 683 # 0 --- 1 --- 3 rev1 changes file foo
684 684 # \ / rev2 renames foo to bar and changes it
685 685 # \- 2 -/ rev3 should have bar with all changes and
686 686 # should record that bar descends from
687 687 # bar in rev2 and foo in rev1
688 688 #
689 689 # this allows this merge to succeed:
690 690 #
691 691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
692 692 # \ / merging rev3 and rev4 should use bar@rev2
693 693 # \- 2 --- 4 as the merge base
694 694 #
695 695
696 696 cf = cp[0]
697 697 cr = manifest1.get(cf)
698 698 nfp = fp2
699 699
700 700 if manifest2: # branch merge
701 701 if fp2 == nullid: # copied on remote side
702 702 if fp1 != nullid or cf in manifest2:
703 703 cr = manifest2[cf]
704 704 nfp = fp1
705 705
706 706 # find source in nearest ancestor if we've lost track
707 707 if not cr:
708 708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
709 709 (fn, cf))
710 710 for a in self['.'].ancestors():
711 711 if cf in a:
712 712 cr = a[cf].filenode()
713 713 break
714 714
715 715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
716 716 meta["copy"] = cf
717 717 meta["copyrev"] = hex(cr)
718 718 fp1, fp2 = nullid, nfp
719 719 elif fp2 != nullid:
720 720 # is one parent an ancestor of the other?
721 721 fpa = fl.ancestor(fp1, fp2)
722 722 if fpa == fp1:
723 723 fp1, fp2 = fp2, nullid
724 724 elif fpa == fp2:
725 725 fp2 = nullid
726 726
727 727 # is the file unmodified from the parent? report existing entry
728 728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
729 729 return fp1
730 730
731 731 changelist.append(fn)
732 732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
733 733
734 734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
735 735 if p1 is None:
736 736 p1, p2 = self.dirstate.parents()
737 737 return self.commit(files=files, text=text, user=user, date=date,
738 738 p1=p1, p2=p2, extra=extra, empty_ok=True)
739 739
740 740 def commit(self, files=None, text="", user=None, date=None,
741 741 match=None, force=False, force_editor=False,
742 742 p1=None, p2=None, extra={}, empty_ok=False):
743 743 wlock = lock = None
744 744 if files:
745 745 files = util.unique(files)
746 746 try:
747 747 wlock = self.wlock()
748 748 lock = self.lock()
749 749 use_dirstate = (p1 is None) # not rawcommit
750 750
751 751 if use_dirstate:
752 752 p1, p2 = self.dirstate.parents()
753 753 update_dirstate = True
754 754
755 755 if (not force and p2 != nullid and
756 756 (match and (match.files() or match.anypats()))):
757 757 raise util.Abort(_('cannot partially commit a merge '
758 758 '(do not specify files or patterns)'))
759 759
760 760 if files:
761 761 modified, removed = [], []
762 762 for f in files:
763 763 s = self.dirstate[f]
764 764 if s in 'nma':
765 765 modified.append(f)
766 766 elif s == 'r':
767 767 removed.append(f)
768 768 else:
769 769 self.ui.warn(_("%s not tracked!\n") % f)
770 770 changes = [modified, [], removed, [], []]
771 771 else:
772 772 changes = self.status(match=match)
773 773 else:
774 774 p1, p2 = p1, p2 or nullid
775 775 update_dirstate = (self.dirstate.parents()[0] == p1)
776 776 changes = [files, [], [], [], []]
777 777
778 778 ms = merge_.mergestate(self)
779 779 for f in changes[0]:
780 780 if f in ms and ms[f] == 'u':
781 781 raise util.Abort(_("unresolved merge conflicts "
782 782 "(see hg resolve)"))
783 783 wctx = context.workingctx(self, (p1, p2), text, user, date,
784 784 extra, changes)
785 785 return self._commitctx(wctx, force, force_editor, empty_ok,
786 786 use_dirstate, update_dirstate)
787 787 finally:
788 788 del lock, wlock
789 789
790 790 def commitctx(self, ctx):
791 791 wlock = lock = None
792 792 try:
793 793 wlock = self.wlock()
794 794 lock = self.lock()
795 795 return self._commitctx(ctx, force=True, force_editor=False,
796 796 empty_ok=True, use_dirstate=False,
797 797 update_dirstate=False)
798 798 finally:
799 799 del lock, wlock
800 800
801 801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
802 802 use_dirstate=True, update_dirstate=True):
803 803 tr = None
804 804 valid = 0 # don't save the dirstate if this isn't set
805 805 try:
806 806 commit = util.sort(wctx.modified() + wctx.added())
807 807 remove = wctx.removed()
808 808 extra = wctx.extra().copy()
809 809 branchname = extra['branch']
810 810 user = wctx.user()
811 811 text = wctx.description()
812 812
813 813 p1, p2 = [p.node() for p in wctx.parents()]
814 814 c1 = self.changelog.read(p1)
815 815 c2 = self.changelog.read(p2)
816 816 m1 = self.manifest.read(c1[0]).copy()
817 817 m2 = self.manifest.read(c2[0])
818 818
819 819 if use_dirstate:
820 820 oldname = c1[5].get("branch") # stored in UTF-8
821 821 if (not commit and not remove and not force and p2 == nullid
822 822 and branchname == oldname):
823 823 self.ui.status(_("nothing changed\n"))
824 824 return None
825 825
826 826 xp1 = hex(p1)
827 827 if p2 == nullid: xp2 = ''
828 828 else: xp2 = hex(p2)
829 829
830 830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
831 831
832 832 tr = self.transaction()
833 833 trp = weakref.proxy(tr)
834 834
835 835 # check in files
836 836 new = {}
837 837 changed = []
838 838 linkrev = len(self)
839 839 for f in commit:
840 840 self.ui.note(f + "\n")
841 841 try:
842 842 fctx = wctx.filectx(f)
843 843 newflags = fctx.flags()
844 844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
845 845 if ((not changed or changed[-1] != f) and
846 846 m2.get(f) != new[f]):
847 847 # mention the file in the changelog if some
848 848 # flag changed, even if there was no content
849 849 # change.
850 850 if m1.flags(f) != newflags:
851 851 changed.append(f)
852 852 m1.set(f, newflags)
853 853 if use_dirstate:
854 854 self.dirstate.normal(f)
855 855
856 856 except (OSError, IOError):
857 857 if use_dirstate:
858 858 self.ui.warn(_("trouble committing %s!\n") % f)
859 859 raise
860 860 else:
861 861 remove.append(f)
862 862
863 863 # update manifest
864 864 m1.update(new)
865 865 removed = []
866 866
867 867 for f in util.sort(remove):
868 868 if f in m1:
869 869 del m1[f]
870 870 removed.append(f)
871 871 elif f in m2:
872 872 removed.append(f)
873 873 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
874 874 (new, removed))
875 875
876 876 # add changeset
877 877 if (not empty_ok and not text) or force_editor:
878 878 edittext = []
879 879 if text:
880 880 edittext.append(text)
881 881 edittext.append("")
882 882 edittext.append(_("HG: Enter commit message."
883 883 " Lines beginning with 'HG:' are removed."))
884 884 edittext.append("HG: --")
885 885 edittext.append("HG: user: %s" % user)
886 886 if p2 != nullid:
887 887 edittext.append("HG: branch merge")
888 888 if branchname:
889 889 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
890 890 edittext.extend(["HG: changed %s" % f for f in changed])
891 891 edittext.extend(["HG: removed %s" % f for f in removed])
892 892 if not changed and not remove:
893 893 edittext.append("HG: no files changed")
894 894 edittext.append("")
895 895 # run editor in the repository root
896 896 olddir = os.getcwd()
897 897 os.chdir(self.root)
898 898 text = self.ui.edit("\n".join(edittext), user)
899 899 os.chdir(olddir)
900 900
901 901 lines = [line.rstrip() for line in text.rstrip().splitlines()]
902 902 while lines and not lines[0]:
903 903 del lines[0]
904 904 if not lines and use_dirstate:
905 905 raise util.Abort(_("empty commit message"))
906 906 text = '\n'.join(lines)
907 907
908 908 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
909 909 user, wctx.date(), extra)
910 910 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
911 911 parent2=xp2)
912 912 tr.close()
913 913
914 914 if self.branchcache:
915 915 self.branchtags()
916 916
917 917 if use_dirstate or update_dirstate:
918 918 self.dirstate.setparents(n)
919 919 if use_dirstate:
920 920 for f in removed:
921 921 self.dirstate.forget(f)
922 922 valid = 1 # our dirstate updates are complete
923 923
924 924 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
925 925 return n
926 926 finally:
927 927 if not valid: # don't save our updated dirstate
928 928 self.dirstate.invalidate()
929 929 del tr
930 930
931 931 def walk(self, match, node=None):
932 932 '''
933 933 walk recursively through the directory tree or a given
934 934 changeset, finding all files matched by the match
935 935 function
936 936 '''
937 937 return self[node].walk(match)
938 938
939 939 def status(self, node1='.', node2=None, match=None,
940 940 ignored=False, clean=False, unknown=False):
941 941 """return status of files between two nodes or node and working directory
942 942
943 943 If node1 is None, use the first dirstate parent instead.
944 944 If node2 is None, compare node1 with working directory.
945 945 """
946 946
947 947 def mfmatches(ctx):
948 948 mf = ctx.manifest().copy()
949 949 for fn in mf.keys():
950 950 if not match(fn):
951 951 del mf[fn]
952 952 return mf
953 953
954 954 ctx1 = self[node1]
955 955 ctx2 = self[node2]
956 956 working = ctx2 == self[None]
957 957 parentworking = working and ctx1 == self['.']
958 958 match = match or match_.always(self.root, self.getcwd())
959 959 listignored, listclean, listunknown = ignored, clean, unknown
960 960
961 961 if working: # we need to scan the working dir
962 962 s = self.dirstate.status(match, listignored, listclean, listunknown)
963 963 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
964 964
965 965 # check for any possibly clean files
966 966 if parentworking and cmp:
967 967 fixup = []
968 968 # do a full compare of any files that might have changed
969 969 for f in cmp:
970 970 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
971 971 or ctx1[f].cmp(ctx2[f].data())):
972 972 modified.append(f)
973 973 else:
974 974 fixup.append(f)
975 975
976 976 if listclean:
977 977 clean += fixup
978 978
979 979 # update dirstate for files that are actually clean
980 980 if fixup:
981 981 wlock = None
982 982 try:
983 983 try:
984 984 wlock = self.wlock(False)
985 985 for f in fixup:
986 986 self.dirstate.normal(f)
987 987 except lock.LockException:
988 988 pass
989 989 finally:
990 990 del wlock
991 991
992 992 if not parentworking:
993 993 mf1 = mfmatches(ctx1)
994 994 if working:
995 995 # we are comparing working dir against non-parent
996 996 # generate a pseudo-manifest for the working dir
997 997 mf2 = mfmatches(self['.'])
998 998 for f in cmp + modified + added:
999 999 mf2[f] = None
1000 1000 mf2.set(f, ctx2.flags(f))
1001 1001 for f in removed:
1002 1002 if f in mf2:
1003 1003 del mf2[f]
1004 1004 else:
1005 1005 # we are comparing two revisions
1006 1006 deleted, unknown, ignored = [], [], []
1007 1007 mf2 = mfmatches(ctx2)
1008 1008
1009 1009 modified, added, clean = [], [], []
1010 1010 for fn in mf2:
1011 1011 if fn in mf1:
1012 1012 if (mf1.flags(fn) != mf2.flags(fn) or
1013 1013 (mf1[fn] != mf2[fn] and
1014 1014 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1015 1015 modified.append(fn)
1016 1016 elif listclean:
1017 1017 clean.append(fn)
1018 1018 del mf1[fn]
1019 1019 else:
1020 1020 added.append(fn)
1021 1021 removed = mf1.keys()
1022 1022
1023 1023 r = modified, added, removed, deleted, unknown, ignored, clean
1024 1024 [l.sort() for l in r]
1025 1025 return r
1026 1026
1027 1027 def add(self, list):
1028 1028 wlock = self.wlock()
1029 1029 try:
1030 1030 rejected = []
1031 1031 for f in list:
1032 1032 p = self.wjoin(f)
1033 1033 try:
1034 1034 st = os.lstat(p)
1035 1035 except:
1036 1036 self.ui.warn(_("%s does not exist!\n") % f)
1037 1037 rejected.append(f)
1038 1038 continue
1039 1039 if st.st_size > 10000000:
1040 1040 self.ui.warn(_("%s: files over 10MB may cause memory and"
1041 1041 " performance problems\n"
1042 1042 "(use 'hg revert %s' to unadd the file)\n")
1043 1043 % (f, f))
1044 1044 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1045 1045 self.ui.warn(_("%s not added: only files and symlinks "
1046 1046 "supported currently\n") % f)
1047 1047 rejected.append(p)
1048 1048 elif self.dirstate[f] in 'amn':
1049 1049 self.ui.warn(_("%s already tracked!\n") % f)
1050 1050 elif self.dirstate[f] == 'r':
1051 1051 self.dirstate.normallookup(f)
1052 1052 else:
1053 1053 self.dirstate.add(f)
1054 1054 return rejected
1055 1055 finally:
1056 1056 del wlock
1057 1057
1058 1058 def forget(self, list):
1059 1059 wlock = self.wlock()
1060 1060 try:
1061 1061 for f in list:
1062 1062 if self.dirstate[f] != 'a':
1063 1063 self.ui.warn(_("%s not added!\n") % f)
1064 1064 else:
1065 1065 self.dirstate.forget(f)
1066 1066 finally:
1067 1067 del wlock
1068 1068
1069 1069 def remove(self, list, unlink=False):
1070 1070 wlock = None
1071 1071 try:
1072 1072 if unlink:
1073 1073 for f in list:
1074 1074 try:
1075 1075 util.unlink(self.wjoin(f))
1076 1076 except OSError, inst:
1077 1077 if inst.errno != errno.ENOENT:
1078 1078 raise
1079 1079 wlock = self.wlock()
1080 1080 for f in list:
1081 1081 if unlink and os.path.exists(self.wjoin(f)):
1082 1082 self.ui.warn(_("%s still exists!\n") % f)
1083 1083 elif self.dirstate[f] == 'a':
1084 1084 self.dirstate.forget(f)
1085 1085 elif f not in self.dirstate:
1086 1086 self.ui.warn(_("%s not tracked!\n") % f)
1087 1087 else:
1088 1088 self.dirstate.remove(f)
1089 1089 finally:
1090 1090 del wlock
1091 1091
1092 1092 def undelete(self, list):
1093 1093 wlock = None
1094 1094 try:
1095 1095 manifests = [self.manifest.read(self.changelog.read(p)[0])
1096 1096 for p in self.dirstate.parents() if p != nullid]
1097 1097 wlock = self.wlock()
1098 1098 for f in list:
1099 1099 if self.dirstate[f] != 'r':
1100 1100 self.ui.warn("%s not removed!\n" % f)
1101 1101 else:
1102 1102 m = f in manifests[0] and manifests[0] or manifests[1]
1103 1103 t = self.file(f).read(m[f])
1104 1104 self.wwrite(f, t, m.flags(f))
1105 1105 self.dirstate.normal(f)
1106 1106 finally:
1107 1107 del wlock
1108 1108
1109 1109 def copy(self, source, dest):
1110 1110 wlock = None
1111 1111 try:
1112 1112 p = self.wjoin(dest)
1113 1113 if not (os.path.exists(p) or os.path.islink(p)):
1114 1114 self.ui.warn(_("%s does not exist!\n") % dest)
1115 1115 elif not (os.path.isfile(p) or os.path.islink(p)):
1116 1116 self.ui.warn(_("copy failed: %s is not a file or a "
1117 1117 "symbolic link\n") % dest)
1118 1118 else:
1119 1119 wlock = self.wlock()
1120 1120 if dest not in self.dirstate:
1121 1121 self.dirstate.add(dest)
1122 1122 self.dirstate.copy(source, dest)
1123 1123 finally:
1124 1124 del wlock
1125 1125
1126 1126 def heads(self, start=None):
1127 1127 heads = self.changelog.heads(start)
1128 1128 # sort the output in rev descending order
1129 1129 heads = [(-self.changelog.rev(h), h) for h in heads]
1130 1130 return [n for (r, n) in util.sort(heads)]
1131 1131
1132 1132 def branchheads(self, branch=None, start=None):
1133 1133 if branch is None:
1134 1134 branch = self[None].branch()
1135 1135 branches = self.branchtags()
1136 1136 if branch not in branches:
1137 1137 return []
1138 1138 # The basic algorithm is this:
1139 1139 #
1140 1140 # Start from the branch tip since there are no later revisions that can
1141 1141 # possibly be in this branch, and the tip is a guaranteed head.
1142 1142 #
1143 1143 # Remember the tip's parents as the first ancestors, since these by
1144 1144 # definition are not heads.
1145 1145 #
1146 1146 # Step backwards from the brach tip through all the revisions. We are
1147 1147 # guaranteed by the rules of Mercurial that we will now be visiting the
1148 1148 # nodes in reverse topological order (children before parents).
1149 1149 #
1150 1150 # If a revision is one of the ancestors of a head then we can toss it
1151 1151 # out of the ancestors set (we've already found it and won't be
1152 1152 # visiting it again) and put its parents in the ancestors set.
1153 1153 #
1154 1154 # Otherwise, if a revision is in the branch it's another head, since it
1155 1155 # wasn't in the ancestor list of an existing head. So add it to the
1156 1156 # head list, and add its parents to the ancestor list.
1157 1157 #
1158 1158 # If it is not in the branch ignore it.
1159 1159 #
1160 1160 # Once we have a list of heads, use nodesbetween to filter out all the
1161 1161 # heads that cannot be reached from startrev. There may be a more
1162 1162 # efficient way to do this as part of the previous algorithm.
1163 1163
1164 1164 set = util.set
1165 1165 heads = [self.changelog.rev(branches[branch])]
1166 1166 # Don't care if ancestors contains nullrev or not.
1167 1167 ancestors = set(self.changelog.parentrevs(heads[0]))
1168 1168 for rev in xrange(heads[0] - 1, nullrev, -1):
1169 1169 if rev in ancestors:
1170 1170 ancestors.update(self.changelog.parentrevs(rev))
1171 1171 ancestors.remove(rev)
1172 1172 elif self[rev].branch() == branch:
1173 1173 heads.append(rev)
1174 1174 ancestors.update(self.changelog.parentrevs(rev))
1175 1175 heads = [self.changelog.node(rev) for rev in heads]
1176 1176 if start is not None:
1177 1177 heads = self.changelog.nodesbetween([start], heads)[2]
1178 1178 return heads
1179 1179
1180 1180 def branches(self, nodes):
1181 1181 if not nodes:
1182 1182 nodes = [self.changelog.tip()]
1183 1183 b = []
1184 1184 for n in nodes:
1185 1185 t = n
1186 1186 while 1:
1187 1187 p = self.changelog.parents(n)
1188 1188 if p[1] != nullid or p[0] == nullid:
1189 1189 b.append((t, n, p[0], p[1]))
1190 1190 break
1191 1191 n = p[0]
1192 1192 return b
1193 1193
1194 1194 def between(self, pairs):
1195 1195 r = []
1196 1196
1197 1197 for top, bottom in pairs:
1198 1198 n, l, i = top, [], 0
1199 1199 f = 1
1200 1200
1201 1201 while n != bottom:
1202 1202 p = self.changelog.parents(n)[0]
1203 1203 if i == f:
1204 1204 l.append(n)
1205 1205 f = f * 2
1206 1206 n = p
1207 1207 i += 1
1208 1208
1209 1209 r.append(l)
1210 1210
1211 1211 return r
1212 1212
1213 1213 def findincoming(self, remote, base=None, heads=None, force=False):
1214 1214 """Return list of roots of the subsets of missing nodes from remote
1215 1215
1216 1216 If base dict is specified, assume that these nodes and their parents
1217 1217 exist on the remote side and that no child of a node of base exists
1218 1218 in both remote and self.
1219 1219 Furthermore base will be updated to include the nodes that exists
1220 1220 in self and remote but no children exists in self and remote.
1221 1221 If a list of heads is specified, return only nodes which are heads
1222 1222 or ancestors of these heads.
1223 1223
1224 1224 All the ancestors of base are in self and in remote.
1225 1225 All the descendants of the list returned are missing in self.
1226 1226 (and so we know that the rest of the nodes are missing in remote, see
1227 1227 outgoing)
1228 1228 """
1229 1229 m = self.changelog.nodemap
1230 1230 search = []
1231 1231 fetch = {}
1232 1232 seen = {}
1233 1233 seenbranch = {}
1234 1234 if base == None:
1235 1235 base = {}
1236 1236
1237 1237 if not heads:
1238 1238 heads = remote.heads()
1239 1239
1240 1240 if self.changelog.tip() == nullid:
1241 1241 base[nullid] = 1
1242 1242 if heads != [nullid]:
1243 1243 return [nullid]
1244 1244 return []
1245 1245
1246 1246 # assume we're closer to the tip than the root
1247 1247 # and start by examining the heads
1248 1248 self.ui.status(_("searching for changes\n"))
1249 1249
1250 1250 unknown = []
1251 1251 for h in heads:
1252 1252 if h not in m:
1253 1253 unknown.append(h)
1254 1254 else:
1255 1255 base[h] = 1
1256 1256
1257 1257 if not unknown:
1258 1258 return []
1259 1259
1260 1260 req = dict.fromkeys(unknown)
1261 1261 reqcnt = 0
1262 1262
1263 1263 # search through remote branches
1264 1264 # a 'branch' here is a linear segment of history, with four parts:
1265 1265 # head, root, first parent, second parent
1266 1266 # (a branch always has two parents (or none) by definition)
1267 1267 unknown = remote.branches(unknown)
1268 1268 while unknown:
1269 1269 r = []
1270 1270 while unknown:
1271 1271 n = unknown.pop(0)
1272 1272 if n[0] in seen:
1273 1273 continue
1274 1274
1275 1275 self.ui.debug(_("examining %s:%s\n")
1276 1276 % (short(n[0]), short(n[1])))
1277 1277 if n[0] == nullid: # found the end of the branch
1278 1278 pass
1279 1279 elif n in seenbranch:
1280 1280 self.ui.debug(_("branch already found\n"))
1281 1281 continue
1282 1282 elif n[1] and n[1] in m: # do we know the base?
1283 1283 self.ui.debug(_("found incomplete branch %s:%s\n")
1284 1284 % (short(n[0]), short(n[1])))
1285 1285 search.append(n) # schedule branch range for scanning
1286 1286 seenbranch[n] = 1
1287 1287 else:
1288 1288 if n[1] not in seen and n[1] not in fetch:
1289 1289 if n[2] in m and n[3] in m:
1290 1290 self.ui.debug(_("found new changeset %s\n") %
1291 1291 short(n[1]))
1292 1292 fetch[n[1]] = 1 # earliest unknown
1293 1293 for p in n[2:4]:
1294 1294 if p in m:
1295 1295 base[p] = 1 # latest known
1296 1296
1297 1297 for p in n[2:4]:
1298 1298 if p not in req and p not in m:
1299 1299 r.append(p)
1300 1300 req[p] = 1
1301 1301 seen[n[0]] = 1
1302 1302
1303 1303 if r:
1304 1304 reqcnt += 1
1305 1305 self.ui.debug(_("request %d: %s\n") %
1306 1306 (reqcnt, " ".join(map(short, r))))
1307 1307 for p in xrange(0, len(r), 10):
1308 1308 for b in remote.branches(r[p:p+10]):
1309 1309 self.ui.debug(_("received %s:%s\n") %
1310 1310 (short(b[0]), short(b[1])))
1311 1311 unknown.append(b)
1312 1312
1313 1313 # do binary search on the branches we found
1314 1314 while search:
1315 1315 n = search.pop(0)
1316 1316 reqcnt += 1
1317 1317 l = remote.between([(n[0], n[1])])[0]
1318 1318 l.append(n[1])
1319 1319 p = n[0]
1320 1320 f = 1
1321 1321 for i in l:
1322 1322 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1323 1323 if i in m:
1324 1324 if f <= 2:
1325 1325 self.ui.debug(_("found new branch changeset %s\n") %
1326 1326 short(p))
1327 1327 fetch[p] = 1
1328 1328 base[i] = 1
1329 1329 else:
1330 1330 self.ui.debug(_("narrowed branch search to %s:%s\n")
1331 1331 % (short(p), short(i)))
1332 1332 search.append((p, i))
1333 1333 break
1334 1334 p, f = i, f * 2
1335 1335
1336 1336 # sanity check our fetch list
1337 1337 for f in fetch.keys():
1338 1338 if f in m:
1339 1339 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1340 1340
1341 1341 if base.keys() == [nullid]:
1342 1342 if force:
1343 1343 self.ui.warn(_("warning: repository is unrelated\n"))
1344 1344 else:
1345 1345 raise util.Abort(_("repository is unrelated"))
1346 1346
1347 1347 self.ui.debug(_("found new changesets starting at ") +
1348 1348 " ".join([short(f) for f in fetch]) + "\n")
1349 1349
1350 1350 self.ui.debug(_("%d total queries\n") % reqcnt)
1351 1351
1352 1352 return fetch.keys()
1353 1353
1354 1354 def findoutgoing(self, remote, base=None, heads=None, force=False):
1355 1355 """Return list of nodes that are roots of subsets not in remote
1356 1356
1357 1357 If base dict is specified, assume that these nodes and their parents
1358 1358 exist on the remote side.
1359 1359 If a list of heads is specified, return only nodes which are heads
1360 1360 or ancestors of these heads, and return a second element which
1361 1361 contains all remote heads which get new children.
1362 1362 """
1363 1363 if base == None:
1364 1364 base = {}
1365 1365 self.findincoming(remote, base, heads, force=force)
1366 1366
1367 1367 self.ui.debug(_("common changesets up to ")
1368 1368 + " ".join(map(short, base.keys())) + "\n")
1369 1369
1370 1370 remain = dict.fromkeys(self.changelog.nodemap)
1371 1371
1372 1372 # prune everything remote has from the tree
1373 1373 del remain[nullid]
1374 1374 remove = base.keys()
1375 1375 while remove:
1376 1376 n = remove.pop(0)
1377 1377 if n in remain:
1378 1378 del remain[n]
1379 1379 for p in self.changelog.parents(n):
1380 1380 remove.append(p)
1381 1381
1382 1382 # find every node whose parents have been pruned
1383 1383 subset = []
1384 1384 # find every remote head that will get new children
1385 1385 updated_heads = {}
1386 1386 for n in remain:
1387 1387 p1, p2 = self.changelog.parents(n)
1388 1388 if p1 not in remain and p2 not in remain:
1389 1389 subset.append(n)
1390 1390 if heads:
1391 1391 if p1 in heads:
1392 1392 updated_heads[p1] = True
1393 1393 if p2 in heads:
1394 1394 updated_heads[p2] = True
1395 1395
1396 1396 # this is the set of all roots we have to push
1397 1397 if heads:
1398 1398 return subset, updated_heads.keys()
1399 1399 else:
1400 1400 return subset
1401 1401
1402 1402 def pull(self, remote, heads=None, force=False):
1403 1403 lock = self.lock()
1404 1404 try:
1405 1405 fetch = self.findincoming(remote, heads=heads, force=force)
1406 1406 if fetch == [nullid]:
1407 1407 self.ui.status(_("requesting all changes\n"))
1408 1408
1409 1409 if not fetch:
1410 1410 self.ui.status(_("no changes found\n"))
1411 1411 return 0
1412 1412
1413 1413 if heads is None:
1414 1414 cg = remote.changegroup(fetch, 'pull')
1415 1415 else:
1416 1416 if 'changegroupsubset' not in remote.capabilities:
1417 1417 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1418 1418 cg = remote.changegroupsubset(fetch, heads, 'pull')
1419 1419 return self.addchangegroup(cg, 'pull', remote.url())
1420 1420 finally:
1421 1421 del lock
1422 1422
1423 1423 def push(self, remote, force=False, revs=None):
1424 1424 # there are two ways to push to remote repo:
1425 1425 #
1426 1426 # addchangegroup assumes local user can lock remote
1427 1427 # repo (local filesystem, old ssh servers).
1428 1428 #
1429 1429 # unbundle assumes local user cannot lock remote repo (new ssh
1430 1430 # servers, http servers).
1431 1431
1432 1432 if remote.capable('unbundle'):
1433 1433 return self.push_unbundle(remote, force, revs)
1434 1434 return self.push_addchangegroup(remote, force, revs)
1435 1435
1436 1436 def prepush(self, remote, force, revs):
1437 1437 base = {}
1438 1438 remote_heads = remote.heads()
1439 1439 inc = self.findincoming(remote, base, remote_heads, force=force)
1440 1440
1441 1441 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1442 1442 if revs is not None:
1443 1443 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1444 1444 else:
1445 1445 bases, heads = update, self.changelog.heads()
1446 1446
1447 1447 if not bases:
1448 1448 self.ui.status(_("no changes found\n"))
1449 1449 return None, 1
1450 1450 elif not force:
1451 1451 # check if we're creating new remote heads
1452 1452 # to be a remote head after push, node must be either
1453 1453 # - unknown locally
1454 1454 # - a local outgoing head descended from update
1455 1455 # - a remote head that's known locally and not
1456 1456 # ancestral to an outgoing head
1457 1457
1458 1458 warn = 0
1459 1459
1460 1460 if remote_heads == [nullid]:
1461 1461 warn = 0
1462 1462 elif not revs and len(heads) > len(remote_heads):
1463 1463 warn = 1
1464 1464 else:
1465 1465 newheads = list(heads)
1466 1466 for r in remote_heads:
1467 1467 if r in self.changelog.nodemap:
1468 1468 desc = self.changelog.heads(r, heads)
1469 1469 l = [h for h in heads if h in desc]
1470 1470 if not l:
1471 1471 newheads.append(r)
1472 1472 else:
1473 1473 newheads.append(r)
1474 1474 if len(newheads) > len(remote_heads):
1475 1475 warn = 1
1476 1476
1477 1477 if warn:
1478 1478 self.ui.warn(_("abort: push creates new remote heads!\n"))
1479 1479 self.ui.status(_("(did you forget to merge?"
1480 1480 " use push -f to force)\n"))
1481 1481 return None, 0
1482 1482 elif inc:
1483 1483 self.ui.warn(_("note: unsynced remote changes!\n"))
1484 1484
1485 1485
1486 1486 if revs is None:
1487 1487 cg = self.changegroup(update, 'push')
1488 1488 else:
1489 1489 cg = self.changegroupsubset(update, revs, 'push')
1490 1490 return cg, remote_heads
1491 1491
1492 1492 def push_addchangegroup(self, remote, force, revs):
1493 1493 lock = remote.lock()
1494 1494 try:
1495 1495 ret = self.prepush(remote, force, revs)
1496 1496 if ret[0] is not None:
1497 1497 cg, remote_heads = ret
1498 1498 return remote.addchangegroup(cg, 'push', self.url())
1499 1499 return ret[1]
1500 1500 finally:
1501 1501 del lock
1502 1502
1503 1503 def push_unbundle(self, remote, force, revs):
1504 1504 # local repo finds heads on server, finds out what revs it
1505 1505 # must push. once revs transferred, if server finds it has
1506 1506 # different heads (someone else won commit/push race), server
1507 1507 # aborts.
1508 1508
1509 1509 ret = self.prepush(remote, force, revs)
1510 1510 if ret[0] is not None:
1511 1511 cg, remote_heads = ret
1512 1512 if force: remote_heads = ['force']
1513 1513 return remote.unbundle(cg, remote_heads, 'push')
1514 1514 return ret[1]
1515 1515
1516 1516 def changegroupinfo(self, nodes, source):
1517 1517 if self.ui.verbose or source == 'bundle':
1518 1518 self.ui.status(_("%d changesets found\n") % len(nodes))
1519 1519 if self.ui.debugflag:
1520 1520 self.ui.debug(_("List of changesets:\n"))
1521 1521 for node in nodes:
1522 1522 self.ui.debug("%s\n" % hex(node))
1523 1523
1524 1524 def changegroupsubset(self, bases, heads, source, extranodes=None):
1525 1525 """This function generates a changegroup consisting of all the nodes
1526 1526 that are descendents of any of the bases, and ancestors of any of
1527 1527 the heads.
1528 1528
1529 1529 It is fairly complex as determining which filenodes and which
1530 1530 manifest nodes need to be included for the changeset to be complete
1531 1531 is non-trivial.
1532 1532
1533 1533 Another wrinkle is doing the reverse, figuring out which changeset in
1534 1534 the changegroup a particular filenode or manifestnode belongs to.
1535 1535
1536 1536 The caller can specify some nodes that must be included in the
1537 1537 changegroup using the extranodes argument. It should be a dict
1538 1538 where the keys are the filenames (or 1 for the manifest), and the
1539 1539 values are lists of (node, linknode) tuples, where node is a wanted
1540 1540 node and linknode is the changelog node that should be transmitted as
1541 1541 the linkrev.
1542 1542 """
1543 1543
1544 1544 self.hook('preoutgoing', throw=True, source=source)
1545 1545
1546 1546 # Set up some initial variables
1547 1547 # Make it easy to refer to self.changelog
1548 1548 cl = self.changelog
1549 1549 # msng is short for missing - compute the list of changesets in this
1550 1550 # changegroup.
1551 1551 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1552 1552 self.changegroupinfo(msng_cl_lst, source)
1553 1553 # Some bases may turn out to be superfluous, and some heads may be
1554 1554 # too. nodesbetween will return the minimal set of bases and heads
1555 1555 # necessary to re-create the changegroup.
1556 1556
1557 1557 # Known heads are the list of heads that it is assumed the recipient
1558 1558 # of this changegroup will know about.
1559 1559 knownheads = {}
1560 1560 # We assume that all parents of bases are known heads.
1561 1561 for n in bases:
1562 1562 for p in cl.parents(n):
1563 1563 if p != nullid:
1564 1564 knownheads[p] = 1
1565 1565 knownheads = knownheads.keys()
1566 1566 if knownheads:
1567 1567 # Now that we know what heads are known, we can compute which
1568 1568 # changesets are known. The recipient must know about all
1569 1569 # changesets required to reach the known heads from the null
1570 1570 # changeset.
1571 1571 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1572 1572 junk = None
1573 1573 # Transform the list into an ersatz set.
1574 1574 has_cl_set = dict.fromkeys(has_cl_set)
1575 1575 else:
1576 1576 # If there were no known heads, the recipient cannot be assumed to
1577 1577 # know about any changesets.
1578 1578 has_cl_set = {}
1579 1579
1580 1580 # Make it easy to refer to self.manifest
1581 1581 mnfst = self.manifest
1582 1582 # We don't know which manifests are missing yet
1583 1583 msng_mnfst_set = {}
1584 1584 # Nor do we know which filenodes are missing.
1585 1585 msng_filenode_set = {}
1586 1586
1587 1587 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1588 1588 junk = None
1589 1589
1590 1590 # A changeset always belongs to itself, so the changenode lookup
1591 1591 # function for a changenode is identity.
1592 1592 def identity(x):
1593 1593 return x
1594 1594
1595 1595 # A function generating function. Sets up an environment for the
1596 1596 # inner function.
1597 1597 def cmp_by_rev_func(revlog):
1598 1598 # Compare two nodes by their revision number in the environment's
1599 1599 # revision history. Since the revision number both represents the
1600 1600 # most efficient order to read the nodes in, and represents a
1601 1601 # topological sorting of the nodes, this function is often useful.
1602 1602 def cmp_by_rev(a, b):
1603 1603 return cmp(revlog.rev(a), revlog.rev(b))
1604 1604 return cmp_by_rev
1605 1605
1606 1606 # If we determine that a particular file or manifest node must be a
1607 1607 # node that the recipient of the changegroup will already have, we can
1608 1608 # also assume the recipient will have all the parents. This function
1609 1609 # prunes them from the set of missing nodes.
1610 1610 def prune_parents(revlog, hasset, msngset):
1611 1611 haslst = hasset.keys()
1612 1612 haslst.sort(cmp_by_rev_func(revlog))
1613 1613 for node in haslst:
1614 1614 parentlst = [p for p in revlog.parents(node) if p != nullid]
1615 1615 while parentlst:
1616 1616 n = parentlst.pop()
1617 1617 if n not in hasset:
1618 1618 hasset[n] = 1
1619 1619 p = [p for p in revlog.parents(n) if p != nullid]
1620 1620 parentlst.extend(p)
1621 1621 for n in hasset:
1622 1622 msngset.pop(n, None)
1623 1623
1624 1624 # This is a function generating function used to set up an environment
1625 1625 # for the inner function to execute in.
1626 1626 def manifest_and_file_collector(changedfileset):
1627 1627 # This is an information gathering function that gathers
1628 1628 # information from each changeset node that goes out as part of
1629 1629 # the changegroup. The information gathered is a list of which
1630 1630 # manifest nodes are potentially required (the recipient may
1631 1631 # already have them) and total list of all files which were
1632 1632 # changed in any changeset in the changegroup.
1633 1633 #
1634 1634 # We also remember the first changenode we saw any manifest
1635 1635 # referenced by so we can later determine which changenode 'owns'
1636 1636 # the manifest.
1637 1637 def collect_manifests_and_files(clnode):
1638 1638 c = cl.read(clnode)
1639 1639 for f in c[3]:
1640 1640 # This is to make sure we only have one instance of each
1641 1641 # filename string for each filename.
1642 1642 changedfileset.setdefault(f, f)
1643 1643 msng_mnfst_set.setdefault(c[0], clnode)
1644 1644 return collect_manifests_and_files
1645 1645
1646 1646 # Figure out which manifest nodes (of the ones we think might be part
1647 1647 # of the changegroup) the recipient must know about and remove them
1648 1648 # from the changegroup.
1649 1649 def prune_manifests():
1650 1650 has_mnfst_set = {}
1651 1651 for n in msng_mnfst_set:
1652 1652 # If a 'missing' manifest thinks it belongs to a changenode
1653 1653 # the recipient is assumed to have, obviously the recipient
1654 1654 # must have that manifest.
1655 1655 linknode = cl.node(mnfst.linkrev(n))
1656 1656 if linknode in has_cl_set:
1657 1657 has_mnfst_set[n] = 1
1658 1658 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1659 1659
1660 1660 # Use the information collected in collect_manifests_and_files to say
1661 1661 # which changenode any manifestnode belongs to.
1662 1662 def lookup_manifest_link(mnfstnode):
1663 1663 return msng_mnfst_set[mnfstnode]
1664 1664
1665 1665 # A function generating function that sets up the initial environment
1666 1666 # the inner function.
1667 1667 def filenode_collector(changedfiles):
1668 1668 next_rev = [0]
1669 1669 # This gathers information from each manifestnode included in the
1670 1670 # changegroup about which filenodes the manifest node references
1671 1671 # so we can include those in the changegroup too.
1672 1672 #
1673 1673 # It also remembers which changenode each filenode belongs to. It
1674 1674 # does this by assuming the a filenode belongs to the changenode
1675 1675 # the first manifest that references it belongs to.
1676 1676 def collect_msng_filenodes(mnfstnode):
1677 1677 r = mnfst.rev(mnfstnode)
1678 1678 if r == next_rev[0]:
1679 1679 # If the last rev we looked at was the one just previous,
1680 1680 # we only need to see a diff.
1681 1681 deltamf = mnfst.readdelta(mnfstnode)
1682 1682 # For each line in the delta
1683 1683 for f, fnode in deltamf.items():
1684 1684 f = changedfiles.get(f, None)
1685 1685 # And if the file is in the list of files we care
1686 1686 # about.
1687 1687 if f is not None:
1688 1688 # Get the changenode this manifest belongs to
1689 1689 clnode = msng_mnfst_set[mnfstnode]
1690 1690 # Create the set of filenodes for the file if
1691 1691 # there isn't one already.
1692 1692 ndset = msng_filenode_set.setdefault(f, {})
1693 1693 # And set the filenode's changelog node to the
1694 1694 # manifest's if it hasn't been set already.
1695 1695 ndset.setdefault(fnode, clnode)
1696 1696 else:
1697 1697 # Otherwise we need a full manifest.
1698 1698 m = mnfst.read(mnfstnode)
1699 1699 # For every file in we care about.
1700 1700 for f in changedfiles:
1701 1701 fnode = m.get(f, None)
1702 1702 # If it's in the manifest
1703 1703 if fnode is not None:
1704 1704 # See comments above.
1705 1705 clnode = msng_mnfst_set[mnfstnode]
1706 1706 ndset = msng_filenode_set.setdefault(f, {})
1707 1707 ndset.setdefault(fnode, clnode)
1708 1708 # Remember the revision we hope to see next.
1709 1709 next_rev[0] = r + 1
1710 1710 return collect_msng_filenodes
1711 1711
1712 1712 # We have a list of filenodes we think we need for a file, lets remove
1713 1713 # all those we now the recipient must have.
1714 1714 def prune_filenodes(f, filerevlog):
1715 1715 msngset = msng_filenode_set[f]
1716 1716 hasset = {}
1717 1717 # If a 'missing' filenode thinks it belongs to a changenode we
1718 1718 # assume the recipient must have, then the recipient must have
1719 1719 # that filenode.
1720 1720 for n in msngset:
1721 1721 clnode = cl.node(filerevlog.linkrev(n))
1722 1722 if clnode in has_cl_set:
1723 1723 hasset[n] = 1
1724 1724 prune_parents(filerevlog, hasset, msngset)
1725 1725
1726 1726 # A function generator function that sets up the a context for the
1727 1727 # inner function.
1728 1728 def lookup_filenode_link_func(fname):
1729 1729 msngset = msng_filenode_set[fname]
1730 1730 # Lookup the changenode the filenode belongs to.
1731 1731 def lookup_filenode_link(fnode):
1732 1732 return msngset[fnode]
1733 1733 return lookup_filenode_link
1734 1734
1735 1735 # Add the nodes that were explicitly requested.
1736 1736 def add_extra_nodes(name, nodes):
1737 1737 if not extranodes or name not in extranodes:
1738 1738 return
1739 1739
1740 1740 for node, linknode in extranodes[name]:
1741 1741 if node not in nodes:
1742 1742 nodes[node] = linknode
1743 1743
1744 1744 # Now that we have all theses utility functions to help out and
1745 1745 # logically divide up the task, generate the group.
1746 1746 def gengroup():
1747 1747 # The set of changed files starts empty.
1748 1748 changedfiles = {}
1749 1749 # Create a changenode group generator that will call our functions
1750 1750 # back to lookup the owning changenode and collect information.
1751 1751 group = cl.group(msng_cl_lst, identity,
1752 1752 manifest_and_file_collector(changedfiles))
1753 1753 for chnk in group:
1754 1754 yield chnk
1755 1755
1756 1756 # The list of manifests has been collected by the generator
1757 1757 # calling our functions back.
1758 1758 prune_manifests()
1759 1759 add_extra_nodes(1, msng_mnfst_set)
1760 1760 msng_mnfst_lst = msng_mnfst_set.keys()
1761 1761 # Sort the manifestnodes by revision number.
1762 1762 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1763 1763 # Create a generator for the manifestnodes that calls our lookup
1764 1764 # and data collection functions back.
1765 1765 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1766 1766 filenode_collector(changedfiles))
1767 1767 for chnk in group:
1768 1768 yield chnk
1769 1769
1770 1770 # These are no longer needed, dereference and toss the memory for
1771 1771 # them.
1772 1772 msng_mnfst_lst = None
1773 1773 msng_mnfst_set.clear()
1774 1774
1775 1775 if extranodes:
1776 1776 for fname in extranodes:
1777 1777 if isinstance(fname, int):
1778 1778 continue
1779 1779 add_extra_nodes(fname,
1780 1780 msng_filenode_set.setdefault(fname, {}))
1781 1781 changedfiles[fname] = 1
1782 1782 # Go through all our files in order sorted by name.
1783 1783 for fname in util.sort(changedfiles):
1784 1784 filerevlog = self.file(fname)
1785 1785 if not len(filerevlog):
1786 1786 raise util.Abort(_("empty or missing revlog for %s") % fname)
1787 1787 # Toss out the filenodes that the recipient isn't really
1788 1788 # missing.
1789 1789 if fname in msng_filenode_set:
1790 1790 prune_filenodes(fname, filerevlog)
1791 1791 msng_filenode_lst = msng_filenode_set[fname].keys()
1792 1792 else:
1793 1793 msng_filenode_lst = []
1794 1794 # If any filenodes are left, generate the group for them,
1795 1795 # otherwise don't bother.
1796 1796 if len(msng_filenode_lst) > 0:
1797 1797 yield changegroup.chunkheader(len(fname))
1798 1798 yield fname
1799 1799 # Sort the filenodes by their revision #
1800 1800 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1801 1801 # Create a group generator and only pass in a changenode
1802 1802 # lookup function as we need to collect no information
1803 1803 # from filenodes.
1804 1804 group = filerevlog.group(msng_filenode_lst,
1805 1805 lookup_filenode_link_func(fname))
1806 1806 for chnk in group:
1807 1807 yield chnk
1808 1808 if fname in msng_filenode_set:
1809 1809 # Don't need this anymore, toss it to free memory.
1810 1810 del msng_filenode_set[fname]
1811 1811 # Signal that no more groups are left.
1812 1812 yield changegroup.closechunk()
1813 1813
1814 1814 if msng_cl_lst:
1815 1815 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1816 1816
1817 1817 return util.chunkbuffer(gengroup())
1818 1818
1819 1819 def changegroup(self, basenodes, source):
1820 1820 """Generate a changegroup of all nodes that we have that a recipient
1821 1821 doesn't.
1822 1822
1823 1823 This is much easier than the previous function as we can assume that
1824 1824 the recipient has any changenode we aren't sending them."""
1825 1825
1826 1826 self.hook('preoutgoing', throw=True, source=source)
1827 1827
1828 1828 cl = self.changelog
1829 1829 nodes = cl.nodesbetween(basenodes, None)[0]
1830 1830 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1831 1831 self.changegroupinfo(nodes, source)
1832 1832
1833 1833 def identity(x):
1834 1834 return x
1835 1835
1836 1836 def gennodelst(log):
1837 1837 for r in log:
1838 1838 n = log.node(r)
1839 1839 if log.linkrev(n) in revset:
1840 1840 yield n
1841 1841
1842 1842 def changed_file_collector(changedfileset):
1843 1843 def collect_changed_files(clnode):
1844 1844 c = cl.read(clnode)
1845 1845 for fname in c[3]:
1846 1846 changedfileset[fname] = 1
1847 1847 return collect_changed_files
1848 1848
1849 1849 def lookuprevlink_func(revlog):
1850 1850 def lookuprevlink(n):
1851 1851 return cl.node(revlog.linkrev(n))
1852 1852 return lookuprevlink
1853 1853
1854 1854 def gengroup():
1855 1855 # construct a list of all changed files
1856 1856 changedfiles = {}
1857 1857
1858 1858 for chnk in cl.group(nodes, identity,
1859 1859 changed_file_collector(changedfiles)):
1860 1860 yield chnk
1861 1861
1862 1862 mnfst = self.manifest
1863 1863 nodeiter = gennodelst(mnfst)
1864 1864 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1865 1865 yield chnk
1866 1866
1867 1867 for fname in util.sort(changedfiles):
1868 1868 filerevlog = self.file(fname)
1869 1869 if not len(filerevlog):
1870 1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1871 1871 nodeiter = gennodelst(filerevlog)
1872 1872 nodeiter = list(nodeiter)
1873 1873 if nodeiter:
1874 1874 yield changegroup.chunkheader(len(fname))
1875 1875 yield fname
1876 1876 lookup = lookuprevlink_func(filerevlog)
1877 1877 for chnk in filerevlog.group(nodeiter, lookup):
1878 1878 yield chnk
1879 1879
1880 1880 yield changegroup.closechunk()
1881 1881
1882 1882 if nodes:
1883 1883 self.hook('outgoing', node=hex(nodes[0]), source=source)
1884 1884
1885 1885 return util.chunkbuffer(gengroup())
1886 1886
1887 1887 def addchangegroup(self, source, srctype, url, emptyok=False):
1888 1888 """add changegroup to repo.
1889 1889
1890 1890 return values:
1891 1891 - nothing changed or no source: 0
1892 1892 - more heads than before: 1+added heads (2..n)
1893 1893 - less heads than before: -1-removed heads (-2..-n)
1894 1894 - number of heads stays the same: 1
1895 1895 """
1896 1896 def csmap(x):
1897 1897 self.ui.debug(_("add changeset %s\n") % short(x))
1898 1898 return len(cl)
1899 1899
1900 1900 def revmap(x):
1901 1901 return cl.rev(x)
1902 1902
1903 1903 if not source:
1904 1904 return 0
1905 1905
1906 1906 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1907 1907
1908 1908 changesets = files = revisions = 0
1909 1909
1910 1910 # write changelog data to temp files so concurrent readers will not see
1911 1911 # inconsistent view
1912 1912 cl = self.changelog
1913 1913 cl.delayupdate()
1914 1914 oldheads = len(cl.heads())
1915 1915
1916 1916 tr = self.transaction()
1917 1917 try:
1918 1918 trp = weakref.proxy(tr)
1919 1919 # pull off the changeset group
1920 1920 self.ui.status(_("adding changesets\n"))
1921 1921 cor = len(cl) - 1
1922 1922 chunkiter = changegroup.chunkiter(source)
1923 1923 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1924 1924 raise util.Abort(_("received changelog group is empty"))
1925 1925 cnr = len(cl) - 1
1926 1926 changesets = cnr - cor
1927 1927
1928 1928 # pull off the manifest group
1929 1929 self.ui.status(_("adding manifests\n"))
1930 1930 chunkiter = changegroup.chunkiter(source)
1931 1931 # no need to check for empty manifest group here:
1932 1932 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1933 1933 # no new manifest will be created and the manifest group will
1934 1934 # be empty during the pull
1935 1935 self.manifest.addgroup(chunkiter, revmap, trp)
1936 1936
1937 1937 # process the files
1938 1938 self.ui.status(_("adding file changes\n"))
1939 1939 while 1:
1940 1940 f = changegroup.getchunk(source)
1941 1941 if not f:
1942 1942 break
1943 1943 self.ui.debug(_("adding %s revisions\n") % f)
1944 1944 fl = self.file(f)
1945 1945 o = len(fl)
1946 1946 chunkiter = changegroup.chunkiter(source)
1947 1947 if fl.addgroup(chunkiter, revmap, trp) is None:
1948 1948 raise util.Abort(_("received file revlog group is empty"))
1949 1949 revisions += len(fl) - o
1950 1950 files += 1
1951 1951
1952 1952 # make changelog see real files again
1953 1953 cl.finalize(trp)
1954 1954
1955 1955 newheads = len(self.changelog.heads())
1956 1956 heads = ""
1957 1957 if oldheads and newheads != oldheads:
1958 1958 heads = _(" (%+d heads)") % (newheads - oldheads)
1959 1959
1960 1960 self.ui.status(_("added %d changesets"
1961 1961 " with %d changes to %d files%s\n")
1962 1962 % (changesets, revisions, files, heads))
1963 1963
1964 1964 if changesets > 0:
1965 1965 self.hook('pretxnchangegroup', throw=True,
1966 1966 node=hex(self.changelog.node(cor+1)), source=srctype,
1967 1967 url=url)
1968 1968
1969 1969 tr.close()
1970 1970 finally:
1971 1971 del tr
1972 1972
1973 1973 if changesets > 0:
1974 1974 # forcefully update the on-disk branch cache
1975 1975 self.ui.debug(_("updating the branch cache\n"))
1976 1976 self.branchtags()
1977 1977 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1978 1978 source=srctype, url=url)
1979 1979
1980 1980 for i in xrange(cor + 1, cnr + 1):
1981 1981 self.hook("incoming", node=hex(self.changelog.node(i)),
1982 1982 source=srctype, url=url)
1983 1983
1984 1984 # never return 0 here:
1985 1985 if newheads < oldheads:
1986 1986 return newheads - oldheads - 1
1987 1987 else:
1988 1988 return newheads - oldheads + 1
1989 1989
1990 1990
1991 1991 def stream_in(self, remote):
1992 1992 fp = remote.stream_out()
1993 1993 l = fp.readline()
1994 1994 try:
1995 1995 resp = int(l)
1996 1996 except ValueError:
1997 1997 raise util.UnexpectedOutput(
1998 1998 _('Unexpected response from remote server:'), l)
1999 1999 if resp == 1:
2000 2000 raise util.Abort(_('operation forbidden by server'))
2001 2001 elif resp == 2:
2002 2002 raise util.Abort(_('locking the remote repository failed'))
2003 2003 elif resp != 0:
2004 2004 raise util.Abort(_('the server sent an unknown error code'))
2005 2005 self.ui.status(_('streaming all changes\n'))
2006 2006 l = fp.readline()
2007 2007 try:
2008 2008 total_files, total_bytes = map(int, l.split(' ', 1))
2009 2009 except (ValueError, TypeError):
2010 2010 raise util.UnexpectedOutput(
2011 2011 _('Unexpected response from remote server:'), l)
2012 2012 self.ui.status(_('%d files to transfer, %s of data\n') %
2013 2013 (total_files, util.bytecount(total_bytes)))
2014 2014 start = time.time()
2015 2015 for i in xrange(total_files):
2016 2016 # XXX doesn't support '\n' or '\r' in filenames
2017 2017 l = fp.readline()
2018 2018 try:
2019 2019 name, size = l.split('\0', 1)
2020 2020 size = int(size)
2021 2021 except ValueError, TypeError:
2022 2022 raise util.UnexpectedOutput(
2023 2023 _('Unexpected response from remote server:'), l)
2024 2024 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2025 2025 ofp = self.sopener(name, 'w')
2026 2026 for chunk in util.filechunkiter(fp, limit=size):
2027 2027 ofp.write(chunk)
2028 2028 ofp.close()
2029 2029 elapsed = time.time() - start
2030 2030 if elapsed <= 0:
2031 2031 elapsed = 0.001
2032 2032 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2033 2033 (util.bytecount(total_bytes), elapsed,
2034 2034 util.bytecount(total_bytes / elapsed)))
2035 2035 self.invalidate()
2036 2036 return len(self.heads()) + 1
2037 2037
2038 2038 def clone(self, remote, heads=[], stream=False):
2039 2039 '''clone remote repository.
2040 2040
2041 2041 keyword arguments:
2042 2042 heads: list of revs to clone (forces use of pull)
2043 2043 stream: use streaming clone if possible'''
2044 2044
2045 2045 # now, all clients that can request uncompressed clones can
2046 2046 # read repo formats supported by all servers that can serve
2047 2047 # them.
2048 2048
2049 2049 # if revlog format changes, client will have to check version
2050 2050 # and format flags on "stream" capability, and use
2051 2051 # uncompressed only if compatible.
2052 2052
2053 2053 if stream and not heads and remote.capable('stream'):
2054 2054 return self.stream_in(remote)
2055 2055 return self.pull(remote, heads)
2056 2056
2057 2057 def storefiles(self):
2058 2058 '''get all *.i and *.d files in the store
2059 2059
2060 2060 Returns (list of (filename, size), total_bytes)'''
2061 2061
2062 2062 lock = None
2063 2063 try:
2064 2064 self.ui.debug('scanning\n')
2065 2065 entries = []
2066 2066 total_bytes = 0
2067 2067 # get consistent snapshot of repo, lock during scan
2068 2068 lock = self.lock()
2069 for name, size in self.store.walk():
2069 for name, ename, size in self.store.walk():
2070 2070 entries.append((name, size))
2071 2071 total_bytes += size
2072 2072 return entries, total_bytes
2073 2073 finally:
2074 2074 del lock
2075 2075
2076 2076 # used to avoid circular references so destructors work
2077 2077 def aftertrans(files):
2078 2078 renamefiles = [tuple(t) for t in files]
2079 2079 def a():
2080 2080 for src, dest in renamefiles:
2081 2081 util.rename(src, dest)
2082 2082 return a
2083 2083
2084 2084 def instance(ui, path, create):
2085 2085 return localrepository(ui, util.drop_scheme('file', path), create)
2086 2086
2087 2087 def islocal(path):
2088 2088 return True
@@ -1,117 +1,116 b''
1 1 # store.py - repository store handling for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 from i18n import _
9 8 import os, stat, osutil, util
10 9
11 10 def _buildencodefun():
12 11 e = '_'
13 12 win_reserved = [ord(x) for x in '\\:*?"<>|']
14 13 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
15 14 for x in (range(32) + range(126, 256) + win_reserved):
16 15 cmap[chr(x)] = "~%02x" % x
17 16 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
18 17 cmap[chr(x)] = e + chr(x).lower()
19 18 dmap = {}
20 19 for k, v in cmap.iteritems():
21 20 dmap[v] = k
22 21 def decode(s):
23 22 i = 0
24 23 while i < len(s):
25 24 for l in xrange(1, 4):
26 25 try:
27 26 yield dmap[s[i:i+l]]
28 27 i += l
29 28 break
30 29 except KeyError:
31 30 pass
32 31 else:
33 32 raise KeyError
34 33 return (lambda s: "".join([cmap[c] for c in s]),
35 34 lambda s: "".join(list(decode(s))))
36 35
37 36 encodefilename, decodefilename = _buildencodefun()
38 37
39 38 def _calcmode(path):
40 39 try:
41 40 # files in .hg/ will be created using this mode
42 41 mode = os.stat(path).st_mode
43 42 # avoid some useless chmods
44 43 if (0777 & ~util._umask) == (0777 & mode):
45 44 mode = None
46 45 except OSError:
47 46 mode = None
48 47 return mode
49 48
50 49 class basicstore:
51 50 '''base class for local repository stores'''
52 51 def __init__(self, path, opener):
53 52 self.path = path
54 53 self.createmode = _calcmode(path)
55 54 self.opener = opener(self.path)
56 55 self.opener.createmode = self.createmode
57 56
58 57 def join(self, f):
59 58 return os.path.join(self.path, f)
60 59
61 60 def _walk(self, relpath, recurse):
62 '''yields (filename, size)'''
61 '''yields (unencoded, encoded, size)'''
63 62 path = os.path.join(self.path, relpath)
64 63 striplen = len(self.path) + len(os.sep)
65 64 prefix = path[striplen:]
66 65 l = []
67 66 if os.path.isdir(path):
68 67 visit = [path]
69 68 while visit:
70 69 p = visit.pop()
71 70 for f, kind, st in osutil.listdir(p, stat=True):
72 71 fp = os.path.join(p, f)
73 72 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
74 l.append((util.pconvert(fp[striplen:]), st.st_size))
73 n = util.pconvert(fp[striplen:])
74 l.append((n, n, st.st_size))
75 75 elif kind == stat.S_IFDIR and recurse:
76 76 visit.append(fp)
77 77 return util.sort(l)
78 78
79 def datafiles(self, reporterror=None):
79 def datafiles(self):
80 80 return self._walk('data', True)
81 81
82 82 def walk(self):
83 '''yields (direncoded filename, size)'''
83 '''yields (unencoded, encoded, size)'''
84 84 # yield data files first
85 85 for x in self.datafiles():
86 86 yield x
87 87 # yield manifest before changelog
88 88 meta = self._walk('', False)
89 89 meta.reverse()
90 90 for x in meta:
91 91 yield x
92 92
93 93 class encodedstore(basicstore):
94 94 def __init__(self, path, opener):
95 95 self.path = os.path.join(path, 'store')
96 96 self.createmode = _calcmode(self.path)
97 97 self.encodefn = encodefilename
98 98 op = opener(self.path)
99 99 op.createmode = self.createmode
100 100 self.opener = lambda f, *args, **kw: op(self.encodefn(f), *args, **kw)
101 101
102 def datafiles(self, reporterror=None):
103 for f, size in self._walk('data', True):
102 def datafiles(self):
103 for a, b, size in self._walk('data', True):
104 104 try:
105 yield decodefilename(f), size
105 a = decodefilename(a)
106 106 except KeyError:
107 if not reporterror:
108 raise
109 reporterror(_("cannot decode filename '%s'") % f)
107 a = None
108 yield a, b, size
110 109
111 110 def join(self, f):
112 111 return os.path.join(self.path, self.encodefn(f))
113 112
114 113 def store(requirements, path, opener):
115 114 if 'store' in requirements:
116 115 return encodedstore(path, opener)
117 116 return basicstore(path, opener)
@@ -1,233 +1,235 b''
1 1 # verify.py - repository integrity checking for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import revlog, util
11 11
12 12 def verify(repo):
13 13 lock = repo.lock()
14 14 try:
15 15 return _verify(repo)
16 16 finally:
17 17 del lock
18 18
19 19 def _verify(repo):
20 20 mflinkrevs = {}
21 21 filelinkrevs = {}
22 22 filenodes = {}
23 23 revisions = 0
24 24 badrevs = {}
25 25 errors = [0]
26 26 warnings = [0]
27 27 ui = repo.ui
28 28 cl = repo.changelog
29 29 mf = repo.manifest
30 30
31 31 def err(linkrev, msg, filename=None):
32 32 if linkrev != None:
33 33 badrevs[linkrev] = True
34 34 else:
35 35 linkrev = '?'
36 36 msg = "%s: %s" % (linkrev, msg)
37 37 if filename:
38 38 msg = "%s@%s" % (filename, msg)
39 39 ui.warn(" " + msg + "\n")
40 40 errors[0] += 1
41 41
42 42 def exc(linkrev, msg, inst, filename=None):
43 43 if isinstance(inst, KeyboardInterrupt):
44 44 ui.warn(_("interrupted"))
45 45 raise
46 46 err(linkrev, "%s: %s" % (msg, inst), filename)
47 47
48 48 def warn(msg):
49 49 ui.warn(msg + "\n")
50 50 warnings[0] += 1
51 51
52 52 def checklog(obj, name):
53 53 if not len(obj) and (havecl or havemf):
54 54 err(0, _("empty or missing %s") % name)
55 55 return
56 56
57 57 d = obj.checksize()
58 58 if d[0]:
59 59 err(None, _("data length off by %d bytes") % d[0], name)
60 60 if d[1]:
61 61 err(None, _("index contains %d extra bytes") % d[1], name)
62 62
63 63 if obj.version != revlog.REVLOGV0:
64 64 if not revlogv1:
65 65 warn(_("warning: `%s' uses revlog format 1") % name)
66 66 elif revlogv1:
67 67 warn(_("warning: `%s' uses revlog format 0") % name)
68 68
69 69 def checkentry(obj, i, node, seen, linkrevs, f):
70 70 lr = obj.linkrev(node)
71 71 if lr < 0 or (havecl and lr not in linkrevs):
72 72 t = "unexpected"
73 73 if lr < 0 or lr >= len(cl):
74 74 t = "nonexistent"
75 75 err(None, _("rev %d point to %s changeset %d") % (i, t, lr), f)
76 76 if linkrevs:
77 77 warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
78 78 lr = None # can't be trusted
79 79
80 80 try:
81 81 p1, p2 = obj.parents(node)
82 82 if p1 not in seen and p1 != nullid:
83 83 err(lr, _("unknown parent 1 %s of %s") %
84 84 (short(p1), short(n)), f)
85 85 if p2 not in seen and p2 != nullid:
86 86 err(lr, _("unknown parent 2 %s of %s") %
87 87 (short(p2), short(p1)), f)
88 88 except Exception, inst:
89 89 exc(lr, _("checking parents of %s") % short(node), inst, f)
90 90
91 91 if node in seen:
92 92 err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
93 93 seen[n] = i
94 94 return lr
95 95
96 96 revlogv1 = cl.version != revlog.REVLOGV0
97 97 if ui.verbose or not revlogv1:
98 98 ui.status(_("repository uses revlog format %d\n") %
99 99 (revlogv1 and 1 or 0))
100 100
101 101 havecl = len(cl) > 0
102 102 havemf = len(mf) > 0
103 103
104 104 ui.status(_("checking changesets\n"))
105 105 seen = {}
106 106 checklog(cl, "changelog")
107 107 for i in repo:
108 108 n = cl.node(i)
109 109 checkentry(cl, i, n, seen, [i], "changelog")
110 110
111 111 try:
112 112 changes = cl.read(n)
113 113 mflinkrevs.setdefault(changes[0], []).append(i)
114 114 for f in changes[3]:
115 115 filelinkrevs.setdefault(f, []).append(i)
116 116 except Exception, inst:
117 117 exc(i, _("unpacking changeset %s") % short(n), inst)
118 118
119 119 ui.status(_("checking manifests\n"))
120 120 seen = {}
121 121 checklog(mf, "manifest")
122 122 for i in mf:
123 123 n = mf.node(i)
124 124 lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
125 125 if n in mflinkrevs:
126 126 del mflinkrevs[n]
127 127
128 128 try:
129 129 for f, fn in mf.readdelta(n).iteritems():
130 130 if not f:
131 131 err(lr, _("file without name in manifest"))
132 132 elif f != "/dev/null":
133 133 fns = filenodes.setdefault(f, {})
134 134 if fn not in fns:
135 135 fns[fn] = n
136 136 except Exception, inst:
137 137 exc(lr, _("reading manifest delta %s") % short(n), inst)
138 138
139 139 ui.status(_("crosschecking files in changesets and manifests\n"))
140 140
141 141 if havemf:
142 142 for c, m in util.sort([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
143 143 err(c, _("changeset refers to unknown manifest %s") % short(m))
144 144 del mflinkrevs
145 145
146 146 for f in util.sort(filelinkrevs):
147 147 if f not in filenodes:
148 148 lr = filelinkrevs[f][0]
149 149 err(lr, _("in changeset but not in manifest"), f)
150 150
151 151 if havecl:
152 152 for f in util.sort(filenodes):
153 153 if f not in filelinkrevs:
154 154 try:
155 155 lr = min([repo.file(f).linkrev(n) for n in filenodes[f]])
156 156 except:
157 157 lr = None
158 158 err(lr, _("in manifest but not in changeset"), f)
159 159
160 160 ui.status(_("checking files\n"))
161 161
162 storefiles = {}
163 for f, size in repo.store.datafiles(lambda m: err(None, m)):
164 if size > 0:
162 storefiles = {}
163 for f, f2, size in repo.store.datafiles():
164 if not f:
165 err(None, _("cannot decode filename '%s'") % f2)
166 elif size > 0:
165 167 storefiles[f] = True
166 168
167 169 files = util.sort(util.unique(filenodes.keys() + filelinkrevs.keys()))
168 170 for f in files:
169 171 fl = repo.file(f)
170 172
171 for ff in fl.files():
173 for ff in fl.files():
172 174 try:
173 175 del storefiles[ff]
174 176 except KeyError:
175 177 err(0, _("missing revlog!"), ff)
176 178
177 179 checklog(fl, f)
178 180 seen = {}
179 181 for i in fl:
180 182 revisions += 1
181 183 n = fl.node(i)
182 184 lr = checkentry(fl, i, n, seen, filelinkrevs.get(f, []), f)
183 185 if f in filenodes:
184 186 if havemf and n not in filenodes[f]:
185 187 err(lr, _("%s not in manifests") % (short(n)), f)
186 188 else:
187 189 del filenodes[f][n]
188 190
189 191 # verify contents
190 192 try:
191 193 t = fl.read(n)
192 194 rp = fl.renamed(n)
193 195 if len(t) != fl.size(i):
194 196 if not fl._readmeta(n): # ancient copy?
195 197 err(lr, _("unpacked size is %s, %s expected") %
196 198 (len(t), fl.size(i)), f)
197 199 except Exception, inst:
198 200 exc(lr, _("unpacking %s") % short(n), inst, f)
199 201
200 202 # check renames
201 203 try:
202 204 if rp:
203 205 fl2 = repo.file(rp[0])
204 206 if not len(fl2):
205 207 err(lr, _("empty or missing copy source revlog %s:%s")
206 208 % (rp[0], short(rp[1])), f)
207 209 elif rp[1] == nullid:
208 210 warn(lr, _("copy source revision is nullid %s:%s")
209 211 % (rp[0], short(rp[1])), f)
210 212 else:
211 213 rev = fl2.rev(rp[1])
212 214 except Exception, inst:
213 215 exc(lr, _("checking rename of %s") % short(n), inst, f)
214 216
215 217 # cross-check
216 218 if f in filenodes:
217 219 fns = [(mf.linkrev(l), n) for n,l in filenodes[f].items()]
218 220 for lr, node in util.sort(fns):
219 221 err(lr, _("%s in manifests not found") % short(node), f)
220 222
221 223 for f in storefiles:
222 224 warn(_("warning: orphan revlog '%s'") % f)
223 225
224 226 ui.status(_("%d files, %d changesets, %d total revisions\n") %
225 227 (len(files), len(cl), revisions))
226 228 if warnings[0]:
227 229 ui.warn(_("%d warnings encountered!\n") % warnings[0])
228 230 if errors[0]:
229 231 ui.warn(_("%d integrity errors encountered!\n") % errors[0])
230 232 if badrevs:
231 233 ui.warn(_("(first damaged changeset appears to be %d)\n")
232 234 % min(badrevs))
233 235 return 1
General Comments 0
You need to be logged in to leave comments. Login now