##// END OF EJS Templates
Fix braindamage in repo.changes...
mpm@selenic.com -
r548:e2e963e2 default
parent child Browse files
Show More
@@ -1,1523 +1,1523 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff")
14 14
15 15 class filelog(revlog):
16 16 def __init__(self, opener, path):
17 17 revlog.__init__(self, opener,
18 18 os.path.join("data", path + ".i"),
19 19 os.path.join("data", path + ".d"))
20 20
21 21 def read(self, node):
22 22 t = self.revision(node)
23 23 if t[:2] != '\1\n':
24 24 return t
25 25 s = t.find('\1\n', 2)
26 26 return t[s+2:]
27 27
28 28 def readmeta(self, node):
29 29 t = self.revision(node)
30 30 if t[:2] != '\1\n':
31 31 return t
32 32 s = t.find('\1\n', 2)
33 33 mt = t[2:s]
34 34 for l in mt.splitlines():
35 35 k, v = l.split(": ", 1)
36 36 m[k] = v
37 37 return m
38 38
39 39 def add(self, text, meta, transaction, link, p1=None, p2=None):
40 40 if meta or text[:2] == '\1\n':
41 41 mt = ""
42 42 if meta:
43 43 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
44 44 text = "\1\n" + "".join(mt) + "\1\n" + text
45 45 return self.addrevision(text, transaction, link, p1, p2)
46 46
47 47 def annotate(self, node):
48 48
49 49 def decorate(text, rev):
50 50 return ([rev] * len(text.splitlines()), text)
51 51
52 52 def pair(parent, child):
53 53 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
54 54 child[0][b1:b2] = parent[0][a1:a2]
55 55 return child
56 56
57 57 # find all ancestors
58 58 needed = {node:1}
59 59 visit = [node]
60 60 while visit:
61 61 n = visit.pop(0)
62 62 for p in self.parents(n):
63 63 if p not in needed:
64 64 needed[p] = 1
65 65 visit.append(p)
66 66 else:
67 67 # count how many times we'll use this
68 68 needed[p] += 1
69 69
70 70 # sort by revision which is a topological order
71 71 visit = [ (self.rev(n), n) for n in needed.keys() ]
72 72 visit.sort()
73 73 hist = {}
74 74
75 75 for r,n in visit:
76 76 curr = decorate(self.read(n), self.linkrev(n))
77 77 for p in self.parents(n):
78 78 if p != nullid:
79 79 curr = pair(hist[p], curr)
80 80 # trim the history of unneeded revs
81 81 needed[p] -= 1
82 82 if not needed[p]:
83 83 del hist[p]
84 84 hist[n] = curr
85 85
86 86 return zip(hist[n][0], hist[n][1].splitlines(1))
87 87
88 88 class manifest(revlog):
89 89 def __init__(self, opener):
90 90 self.mapcache = None
91 91 self.listcache = None
92 92 self.addlist = None
93 93 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
94 94
95 95 def read(self, node):
96 96 if node == nullid: return {} # don't upset local cache
97 97 if self.mapcache and self.mapcache[0] == node:
98 98 return self.mapcache[1].copy()
99 99 text = self.revision(node)
100 100 map = {}
101 101 flag = {}
102 102 self.listcache = (text, text.splitlines(1))
103 103 for l in self.listcache[1]:
104 104 (f, n) = l.split('\0')
105 105 map[f] = bin(n[:40])
106 106 flag[f] = (n[40:-1] == "x")
107 107 self.mapcache = (node, map, flag)
108 108 return map
109 109
110 110 def readflags(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if not self.mapcache or self.mapcache[0] != node:
113 113 self.read(node)
114 114 return self.mapcache[2]
115 115
116 116 def diff(self, a, b):
117 117 # this is sneaky, as we're not actually using a and b
118 118 if self.listcache and self.addlist and self.listcache[0] == a:
119 119 d = mdiff.diff(self.listcache[1], self.addlist, 1)
120 120 if mdiff.patch(a, d) != b:
121 121 sys.stderr.write("*** sortdiff failed, falling back ***\n")
122 122 return mdiff.textdiff(a, b)
123 123 return d
124 124 else:
125 125 return mdiff.textdiff(a, b)
126 126
127 127 def add(self, map, flags, transaction, link, p1=None, p2=None):
128 128 files = map.keys()
129 129 files.sort()
130 130
131 131 self.addlist = ["%s\000%s%s\n" %
132 132 (f, hex(map[f]), flags[f] and "x" or '')
133 133 for f in files]
134 134 text = "".join(self.addlist)
135 135
136 136 n = self.addrevision(text, transaction, link, p1, p2)
137 137 self.mapcache = (n, map, flags)
138 138 self.listcache = (text, self.addlist)
139 139 self.addlist = None
140 140
141 141 return n
142 142
143 143 class changelog(revlog):
144 144 def __init__(self, opener):
145 145 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
146 146
147 147 def extract(self, text):
148 148 if not text:
149 149 return (nullid, "", "0", [], "")
150 150 last = text.index("\n\n")
151 151 desc = text[last + 2:]
152 152 l = text[:last].splitlines()
153 153 manifest = bin(l[0])
154 154 user = l[1]
155 155 date = l[2]
156 156 files = l[3:]
157 157 return (manifest, user, date, files, desc)
158 158
159 159 def read(self, node):
160 160 return self.extract(self.revision(node))
161 161
162 162 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
163 163 user=None, date=None):
164 164 user = (user or
165 165 os.environ.get("HGUSER") or
166 166 os.environ.get("EMAIL") or
167 167 (os.environ.get("LOGNAME",
168 168 os.environ.get("USERNAME", "unknown"))
169 169 + '@' + socket.getfqdn()))
170 170 date = date or "%d %d" % (time.time(), time.timezone)
171 171 list.sort()
172 172 l = [hex(manifest), user, date] + list + ["", desc]
173 173 text = "\n".join(l)
174 174 return self.addrevision(text, transaction, self.count(), p1, p2)
175 175
176 176 class dirstate:
177 177 def __init__(self, opener, ui, root):
178 178 self.opener = opener
179 179 self.root = root
180 180 self.dirty = 0
181 181 self.ui = ui
182 182 self.map = None
183 183 self.pl = None
184 184 self.copies = {}
185 185
186 186 def __del__(self):
187 187 if self.dirty:
188 188 self.write()
189 189
190 190 def __getitem__(self, key):
191 191 try:
192 192 return self.map[key]
193 193 except TypeError:
194 194 self.read()
195 195 return self[key]
196 196
197 197 def __contains__(self, key):
198 198 if not self.map: self.read()
199 199 return key in self.map
200 200
201 201 def parents(self):
202 202 if not self.pl:
203 203 self.read()
204 204 return self.pl
205 205
206 206 def setparents(self, p1, p2 = nullid):
207 207 self.dirty = 1
208 208 self.pl = p1, p2
209 209
210 210 def state(self, key):
211 211 try:
212 212 return self[key][0]
213 213 except KeyError:
214 214 return "?"
215 215
216 216 def read(self):
217 217 if self.map is not None: return self.map
218 218
219 219 self.map = {}
220 220 self.pl = [nullid, nullid]
221 221 try:
222 222 st = self.opener("dirstate").read()
223 223 if not st: return
224 224 except: return
225 225
226 226 self.pl = [st[:20], st[20: 40]]
227 227
228 228 pos = 40
229 229 while pos < len(st):
230 230 e = struct.unpack(">cllll", st[pos:pos+17])
231 231 l = e[4]
232 232 pos += 17
233 233 f = st[pos:pos + l]
234 234 if '\0' in f:
235 235 f, c = f.split('\0')
236 236 self.copies[f] = c
237 237 self.map[f] = e[:4]
238 238 pos += l
239 239
240 240 def copy(self, source, dest):
241 241 self.read()
242 242 self.dirty = 1
243 243 self.copies[dest] = source
244 244
245 245 def copied(self, file):
246 246 return self.copies.get(file, None)
247 247
248 248 def update(self, files, state):
249 249 ''' current states:
250 250 n normal
251 251 m needs merging
252 252 r marked for removal
253 253 a marked for addition'''
254 254
255 255 if not files: return
256 256 self.read()
257 257 self.dirty = 1
258 258 for f in files:
259 259 if state == "r":
260 260 self.map[f] = ('r', 0, 0, 0)
261 261 else:
262 262 s = os.stat(os.path.join(self.root, f))
263 263 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
264 264
265 265 def forget(self, files):
266 266 if not files: return
267 267 self.read()
268 268 self.dirty = 1
269 269 for f in files:
270 270 try:
271 271 del self.map[f]
272 272 except KeyError:
273 273 self.ui.warn("not in dirstate: %s!\n" % f)
274 274 pass
275 275
276 276 def clear(self):
277 277 self.map = {}
278 278 self.dirty = 1
279 279
280 280 def write(self):
281 281 st = self.opener("dirstate", "w")
282 282 st.write("".join(self.pl))
283 283 for f, e in self.map.items():
284 284 c = self.copied(f)
285 285 if c:
286 286 f = f + "\0" + c
287 287 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
288 288 st.write(e + f)
289 289 self.dirty = 0
290 290
291 291 def changes(self, files, ignore):
292 292 self.read()
293 293 dc = self.map.copy()
294 294 lookup, changed, added, unknown = [], [], [], []
295 295
296 296 # compare all files by default
297 297 if not files: files = [self.root]
298 298
299 299 def uniq(g):
300 300 seen = {}
301 301 for f in g:
302 302 if f not in seen:
303 303 seen[f] = 1
304 304 yield f
305 305
306 306 # recursive generator of all files listed
307 307 def walk(files):
308 308 for f in uniq(files):
309 309 f = os.path.join(self.root, f)
310 310 if os.path.isdir(f):
311 311 for dir, subdirs, fl in os.walk(f):
312 312 d = dir[len(self.root) + 1:]
313 313 if ".hg" in subdirs: subdirs.remove(".hg")
314 314 for fn in fl:
315 315 fn = util.pconvert(os.path.join(d, fn))
316 316 yield fn
317 317 else:
318 318 yield f[len(self.root) + 1:]
319 319
320 320 for fn in uniq(walk(files)):
321 321 try: s = os.stat(os.path.join(self.root, fn))
322 322 except: continue
323 323
324 324 if fn in dc:
325 325 c = dc[fn]
326 326 del dc[fn]
327 327
328 328 if c[0] == 'm':
329 329 changed.append(fn)
330 330 elif c[0] == 'a':
331 331 added.append(fn)
332 332 elif c[0] == 'r':
333 333 unknown.append(fn)
334 334 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
335 335 changed.append(fn)
336 336 elif c[1] != s.st_mode or c[3] != s.st_mtime:
337 337 lookup.append(fn)
338 338 else:
339 339 if not ignore(fn): unknown.append(fn)
340 340
341 341 return (lookup, changed, added, dc.keys(), unknown)
342 342
343 343 # used to avoid circular references so destructors work
344 344 def opener(base):
345 345 p = base
346 346 def o(path, mode="r"):
347 347 if p[:7] == "http://":
348 348 f = os.path.join(p, urllib.quote(path))
349 349 return httprangereader.httprangereader(f)
350 350
351 351 f = os.path.join(p, path)
352 352
353 353 mode += "b" # for that other OS
354 354
355 355 if mode[0] != "r":
356 356 try:
357 357 s = os.stat(f)
358 358 except OSError:
359 359 d = os.path.dirname(f)
360 360 if not os.path.isdir(d):
361 361 os.makedirs(d)
362 362 else:
363 363 if s.st_nlink > 1:
364 364 file(f + ".tmp", "wb").write(file(f, "rb").read())
365 365 util.rename(f+".tmp", f)
366 366
367 367 return file(f, mode)
368 368
369 369 return o
370 370
371 371 class RepoError(Exception): pass
372 372
373 373 class localrepository:
374 374 def __init__(self, ui, path=None, create=0):
375 375 self.remote = 0
376 376 if path and path[:7] == "http://":
377 377 self.remote = 1
378 378 self.path = path
379 379 else:
380 380 if not path:
381 381 p = os.getcwd()
382 382 while not os.path.isdir(os.path.join(p, ".hg")):
383 383 oldp = p
384 384 p = os.path.dirname(p)
385 385 if p == oldp: raise RepoError("no repo found")
386 386 path = p
387 387 self.path = os.path.join(path, ".hg")
388 388
389 389 if not create and not os.path.isdir(self.path):
390 390 raise RepoError("repository %s not found" % self.path)
391 391
392 392 self.root = path
393 393 self.ui = ui
394 394
395 395 if create:
396 396 os.mkdir(self.path)
397 397 os.mkdir(self.join("data"))
398 398
399 399 self.opener = opener(self.path)
400 400 self.wopener = opener(self.root)
401 401 self.manifest = manifest(self.opener)
402 402 self.changelog = changelog(self.opener)
403 403 self.ignorelist = None
404 404 self.tagscache = None
405 405 self.nodetagscache = None
406 406
407 407 if not self.remote:
408 408 self.dirstate = dirstate(self.opener, ui, self.root)
409 409 try:
410 410 self.ui.readconfig(self.opener("hgrc"))
411 411 except IOError: pass
412 412
413 413 def ignore(self, f):
414 414 if self.ignorelist is None:
415 415 self.ignorelist = []
416 416 try:
417 417 l = file(self.wjoin(".hgignore"))
418 418 for pat in l:
419 419 if pat != "\n":
420 420 self.ignorelist.append(re.compile(util.pconvert(pat[:-1])))
421 421 except IOError: pass
422 422 for pat in self.ignorelist:
423 423 if pat.search(f): return True
424 424 return False
425 425
426 426 def hook(self, name, **args):
427 427 s = self.ui.config("hooks", name)
428 428 if s:
429 429 self.ui.note("running hook %s: %s\n" % (name, s))
430 430 old = {}
431 431 for k, v in args.items():
432 432 k = k.upper()
433 433 old[k] = os.environ.get(k, None)
434 434 os.environ[k] = v
435 435
436 436 r = os.system(s)
437 437
438 438 for k, v in old.items():
439 439 if v != None:
440 440 os.environ[k] = v
441 441 else:
442 442 del os.environ[k]
443 443
444 444 if r:
445 445 self.ui.warn("abort: %s hook failed with status %d!\n" %
446 446 (name, r))
447 447 return False
448 448 return True
449 449
450 450 def tags(self):
451 451 '''return a mapping of tag to node'''
452 452 if not self.tagscache:
453 453 self.tagscache = {}
454 454 try:
455 455 # read each head of the tags file, ending with the tip
456 456 # and add each tag found to the map, with "newer" ones
457 457 # taking precedence
458 458 fl = self.file(".hgtags")
459 459 h = fl.heads()
460 460 h.reverse()
461 461 for r in h:
462 462 for l in fl.revision(r).splitlines():
463 463 if l:
464 464 n, k = l.split(" ", 1)
465 465 try:
466 466 bin_n = bin(n)
467 467 except TypeError:
468 468 bin_n = ''
469 469 self.tagscache[k.strip()] = bin_n
470 470 except KeyError:
471 471 pass
472 472 for k, n in self.ui.configitems("tags"):
473 473 try:
474 474 bin_n = bin(n)
475 475 except TypeError:
476 476 bin_n = ''
477 477 self.tagscache[k] = bin_n
478 478
479 479 self.tagscache['tip'] = self.changelog.tip()
480 480
481 481 return self.tagscache
482 482
483 483 def tagslist(self):
484 484 '''return a list of tags ordered by revision'''
485 485 l = []
486 486 for t, n in self.tags().items():
487 487 try:
488 488 r = self.changelog.rev(n)
489 489 except:
490 490 r = -2 # sort to the beginning of the list if unknown
491 491 l.append((r,t,n))
492 492 l.sort()
493 493 return [(t,n) for r,t,n in l]
494 494
495 495 def nodetags(self, node):
496 496 '''return the tags associated with a node'''
497 497 if not self.nodetagscache:
498 498 self.nodetagscache = {}
499 499 for t,n in self.tags().items():
500 500 self.nodetagscache.setdefault(n,[]).append(t)
501 501 return self.nodetagscache.get(node, [])
502 502
503 503 def lookup(self, key):
504 504 try:
505 505 return self.tags()[key]
506 506 except KeyError:
507 507 return self.changelog.lookup(key)
508 508
509 509 def join(self, f):
510 510 return os.path.join(self.path, f)
511 511
512 512 def wjoin(self, f):
513 513 return os.path.join(self.root, f)
514 514
515 515 def file(self, f):
516 516 if f[0] == '/': f = f[1:]
517 517 return filelog(self.opener, f)
518 518
519 519 def wfile(self, f, mode='r'):
520 520 return self.wopener(f, mode)
521 521
522 522 def transaction(self):
523 523 # save dirstate for undo
524 524 try:
525 525 ds = self.opener("dirstate").read()
526 526 except IOError:
527 527 ds = ""
528 528 self.opener("undo.dirstate", "w").write(ds)
529 529
530 530 return transaction.transaction(self.opener, self.join("journal"),
531 531 self.join("undo"))
532 532
533 533 def recover(self):
534 534 lock = self.lock()
535 535 if os.path.exists(self.join("recover")):
536 536 self.ui.status("rolling back interrupted transaction\n")
537 537 return transaction.rollback(self.opener, self.join("recover"))
538 538 else:
539 539 self.ui.warn("no interrupted transaction available\n")
540 540
541 541 def undo(self):
542 542 lock = self.lock()
543 543 if os.path.exists(self.join("undo")):
544 544 self.ui.status("rolling back last transaction\n")
545 545 transaction.rollback(self.opener, self.join("undo"))
546 546 self.dirstate = None
547 547 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
548 548 self.dirstate = dirstate(self.opener, self.ui, self.root)
549 549 else:
550 550 self.ui.warn("no undo information available\n")
551 551
552 552 def lock(self, wait = 1):
553 553 try:
554 554 return lock.lock(self.join("lock"), 0)
555 555 except lock.LockHeld, inst:
556 556 if wait:
557 557 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
558 558 return lock.lock(self.join("lock"), wait)
559 559 raise inst
560 560
561 561 def rawcommit(self, files, text, user, date, p1=None, p2=None):
562 562 orig_parent = self.dirstate.parents()[0] or nullid
563 563 p1 = p1 or self.dirstate.parents()[0] or nullid
564 564 p2 = p2 or self.dirstate.parents()[1] or nullid
565 565 c1 = self.changelog.read(p1)
566 566 c2 = self.changelog.read(p2)
567 567 m1 = self.manifest.read(c1[0])
568 568 mf1 = self.manifest.readflags(c1[0])
569 569 m2 = self.manifest.read(c2[0])
570 570
571 571 if orig_parent == p1:
572 572 update_dirstate = 1
573 573 else:
574 574 update_dirstate = 0
575 575
576 576 tr = self.transaction()
577 577 mm = m1.copy()
578 578 mfm = mf1.copy()
579 579 linkrev = self.changelog.count()
580 580 for f in files:
581 581 try:
582 582 t = self.wfile(f).read()
583 583 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
584 584 r = self.file(f)
585 585 mfm[f] = tm
586 586 mm[f] = r.add(t, {}, tr, linkrev,
587 587 m1.get(f, nullid), m2.get(f, nullid))
588 588 if update_dirstate:
589 589 self.dirstate.update([f], "n")
590 590 except IOError:
591 591 try:
592 592 del mm[f]
593 593 del mfm[f]
594 594 if update_dirstate:
595 595 self.dirstate.forget([f])
596 596 except:
597 597 # deleted from p2?
598 598 pass
599 599
600 600 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
601 601 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
602 602 tr.close()
603 603 if update_dirstate:
604 604 self.dirstate.setparents(n, nullid)
605 605
606 606 def commit(self, files = None, text = "", user = None, date = None):
607 607 commit = []
608 608 remove = []
609 609 if files:
610 610 for f in files:
611 611 s = self.dirstate.state(f)
612 612 if s in 'nmai':
613 613 commit.append(f)
614 614 elif s == 'r':
615 615 remove.append(f)
616 616 else:
617 617 self.ui.warn("%s not tracked!\n" % f)
618 618 else:
619 619 (c, a, d, u) = self.changes(None, None)
620 620 commit = c + a
621 621 remove = d
622 622
623 623 if not commit and not remove:
624 624 self.ui.status("nothing changed\n")
625 625 return
626 626
627 627 if not self.hook("precommit"):
628 628 return 1
629 629
630 630 p1, p2 = self.dirstate.parents()
631 631 c1 = self.changelog.read(p1)
632 632 c2 = self.changelog.read(p2)
633 633 m1 = self.manifest.read(c1[0])
634 634 mf1 = self.manifest.readflags(c1[0])
635 635 m2 = self.manifest.read(c2[0])
636 636 lock = self.lock()
637 637 tr = self.transaction()
638 638
639 639 # check in files
640 640 new = {}
641 641 linkrev = self.changelog.count()
642 642 commit.sort()
643 643 for f in commit:
644 644 self.ui.note(f + "\n")
645 645 try:
646 646 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
647 647 t = self.wfile(f).read()
648 648 except IOError:
649 649 self.warn("trouble committing %s!\n" % f)
650 650 raise
651 651
652 652 meta = {}
653 653 cp = self.dirstate.copied(f)
654 654 if cp:
655 655 meta["copy"] = cp
656 656 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
657 657 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
658 658
659 659 r = self.file(f)
660 660 fp1 = m1.get(f, nullid)
661 661 fp2 = m2.get(f, nullid)
662 662 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
663 663
664 664 # update manifest
665 665 m1.update(new)
666 666 for f in remove:
667 667 if f in m1:
668 668 del m1[f]
669 669 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0])
670 670
671 671 # add changeset
672 672 new = new.keys()
673 673 new.sort()
674 674
675 675 if not text:
676 676 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
677 677 edittext += "".join(["HG: changed %s\n" % f for f in new])
678 678 edittext += "".join(["HG: removed %s\n" % f for f in remove])
679 679 edittext = self.ui.edit(edittext)
680 680 if not edittext.rstrip():
681 681 return 1
682 682 text = edittext
683 683
684 684 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
685 685
686 686 if not self.hook("commit", node=hex(n)):
687 687 return 1
688 688
689 689 tr.close()
690 690
691 691 self.dirstate.setparents(n)
692 692 self.dirstate.update(new, "n")
693 693 self.dirstate.forget(remove)
694 694
695 695 def changes(self, node1, node2, files=None):
696 696 # changed, added, deleted, unknown
697 697 c, a, d, u, mf1 = [], [], [], [], None
698 698
699 699 def fcmp(fn, mf):
700 700 t1 = self.wfile(fn).read()
701 701 t2 = self.file(fn).revision(mf[fn])
702 702 return cmp(t1, t2)
703 703
704 704 # are we comparing the working directory?
705 705 if not node1:
706 706 l, c, a, d, u = self.dirstate.changes(files, self.ignore)
707 707
708 708 # are we comparing working dir against its parent?
709 709 if not node2:
710 710 if l:
711 711 # do a full compare of any files that might have changed
712 712 change = self.changelog.read(self.dirstate.parents()[0])
713 713 mf1 = self.manifest.read(change[0])
714 for f in lookup:
715 if fcmp(f, mf):
714 for f in l:
715 if fcmp(f, mf1):
716 716 c.append(f)
717 717 return (c, a, d, u)
718 718
719 719 # are we comparing working dir against non-tip?
720 720 # generate a pseudo-manifest for the working dir
721 721 if not node1:
722 722 if not mf1:
723 723 change = self.changelog.read(self.dirstate.parents()[0])
724 724 mf1 = self.manifest.read(change[0])
725 725 for f in a + c + l:
726 726 mf1[f] = ""
727 727 for f in d:
728 728 if f in mf1: del mf1[f]
729 729 else:
730 730 change = self.changelog.read(node1)
731 731 mf1 = self.manifest.read(change[0])
732 732
733 733 change = self.changelog.read(node2)
734 734 mf2 = self.manifest.read(change[0])
735 735
736 736 for fn in mf2:
737 737 if mf1.has_key(fn):
738 738 if mf1[fn] != mf2[fn]:
739 739 if mf1[fn] != "" or fcmp(fn, mf2):
740 740 c.append(fn)
741 741 del mf1[fn]
742 742 else:
743 743 a.append(fn)
744 744
745 745 d = mf1.keys()
746 746 d.sort()
747 747
748 748 return (c, a, d, u)
749 749
750 750 def add(self, list):
751 751 for f in list:
752 752 p = self.wjoin(f)
753 753 if not os.path.isfile(p):
754 754 self.ui.warn("%s does not exist!\n" % f)
755 755 elif self.dirstate.state(f) == 'n':
756 756 self.ui.warn("%s already tracked!\n" % f)
757 757 else:
758 758 self.dirstate.update([f], "a")
759 759
760 760 def forget(self, list):
761 761 for f in list:
762 762 if self.dirstate.state(f) not in 'ai':
763 763 self.ui.warn("%s not added!\n" % f)
764 764 else:
765 765 self.dirstate.forget([f])
766 766
767 767 def remove(self, list):
768 768 for f in list:
769 769 p = self.wjoin(f)
770 770 if os.path.isfile(p):
771 771 self.ui.warn("%s still exists!\n" % f)
772 772 elif self.dirstate.state(f) == 'a':
773 773 self.ui.warn("%s never committed!\n" % f)
774 774 self.dirstate.forget(f)
775 775 elif f not in self.dirstate:
776 776 self.ui.warn("%s not tracked!\n" % f)
777 777 else:
778 778 self.dirstate.update([f], "r")
779 779
780 780 def copy(self, source, dest):
781 781 p = self.wjoin(dest)
782 782 if not os.path.isfile(dest):
783 783 self.ui.warn("%s does not exist!\n" % dest)
784 784 else:
785 785 if self.dirstate.state(dest) == '?':
786 786 self.dirstate.update([dest], "a")
787 787 self.dirstate.copy(source, dest)
788 788
789 789 def heads(self):
790 790 return self.changelog.heads()
791 791
792 792 def branches(self, nodes):
793 793 if not nodes: nodes = [self.changelog.tip()]
794 794 b = []
795 795 for n in nodes:
796 796 t = n
797 797 while n:
798 798 p = self.changelog.parents(n)
799 799 if p[1] != nullid or p[0] == nullid:
800 800 b.append((t, n, p[0], p[1]))
801 801 break
802 802 n = p[0]
803 803 return b
804 804
805 805 def between(self, pairs):
806 806 r = []
807 807
808 808 for top, bottom in pairs:
809 809 n, l, i = top, [], 0
810 810 f = 1
811 811
812 812 while n != bottom:
813 813 p = self.changelog.parents(n)[0]
814 814 if i == f:
815 815 l.append(n)
816 816 f = f * 2
817 817 n = p
818 818 i += 1
819 819
820 820 r.append(l)
821 821
822 822 return r
823 823
824 824 def newer(self, nodes):
825 825 m = {}
826 826 nl = []
827 827 pm = {}
828 828 cl = self.changelog
829 829 t = l = cl.count()
830 830
831 831 # find the lowest numbered node
832 832 for n in nodes:
833 833 l = min(l, cl.rev(n))
834 834 m[n] = 1
835 835
836 836 for i in xrange(l, t):
837 837 n = cl.node(i)
838 838 if n in m: # explicitly listed
839 839 pm[n] = 1
840 840 nl.append(n)
841 841 continue
842 842 for p in cl.parents(n):
843 843 if p in pm: # parent listed
844 844 pm[n] = 1
845 845 nl.append(n)
846 846 break
847 847
848 848 return nl
849 849
850 850 def findincoming(self, remote):
851 851 m = self.changelog.nodemap
852 852 search = []
853 853 fetch = []
854 854 seen = {}
855 855 seenbranch = {}
856 856
857 857 # if we have an empty repo, fetch everything
858 858 if self.changelog.tip() == nullid:
859 859 self.ui.status("requesting all changes\n")
860 860 return [nullid]
861 861
862 862 # otherwise, assume we're closer to the tip than the root
863 863 self.ui.status("searching for changes\n")
864 864 heads = remote.heads()
865 865 unknown = []
866 866 for h in heads:
867 867 if h not in m:
868 868 unknown.append(h)
869 869
870 870 if not unknown:
871 871 return None
872 872
873 873 rep = {}
874 874 reqcnt = 0
875 875
876 876 unknown = remote.branches(unknown)
877 877 while unknown:
878 878 r = []
879 879 while unknown:
880 880 n = unknown.pop(0)
881 881 if n[0] in seen:
882 882 continue
883 883
884 884 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
885 885 if n[0] == nullid:
886 886 break
887 887 if n in seenbranch:
888 888 self.ui.debug("branch already found\n")
889 889 continue
890 890 if n[1] and n[1] in m: # do we know the base?
891 891 self.ui.debug("found incomplete branch %s:%s\n"
892 892 % (short(n[0]), short(n[1])))
893 893 search.append(n) # schedule branch range for scanning
894 894 seenbranch[n] = 1
895 895 else:
896 896 if n[1] not in seen and n[1] not in fetch:
897 897 if n[2] in m and n[3] in m:
898 898 self.ui.debug("found new changeset %s\n" %
899 899 short(n[1]))
900 900 fetch.append(n[1]) # earliest unknown
901 901 continue
902 902
903 903 for a in n[2:4]:
904 904 if a not in rep:
905 905 r.append(a)
906 906 rep[a] = 1
907 907
908 908 seen[n[0]] = 1
909 909
910 910 if r:
911 911 reqcnt += 1
912 912 self.ui.debug("request %d: %s\n" %
913 913 (reqcnt, " ".join(map(short, r))))
914 914 for p in range(0, len(r), 10):
915 915 for b in remote.branches(r[p:p+10]):
916 916 self.ui.debug("received %s:%s\n" %
917 917 (short(b[0]), short(b[1])))
918 918 if b[0] not in m and b[0] not in seen:
919 919 unknown.append(b)
920 920
921 921 while search:
922 922 n = search.pop(0)
923 923 reqcnt += 1
924 924 l = remote.between([(n[0], n[1])])[0]
925 925 l.append(n[1])
926 926 p = n[0]
927 927 f = 1
928 928 for i in l:
929 929 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
930 930 if i in m:
931 931 if f <= 2:
932 932 self.ui.debug("found new branch changeset %s\n" %
933 933 short(p))
934 934 fetch.append(p)
935 935 else:
936 936 self.ui.debug("narrowed branch search to %s:%s\n"
937 937 % (short(p), short(i)))
938 938 search.append((p, i))
939 939 break
940 940 p, f = i, f * 2
941 941
942 942 for f in fetch:
943 943 if f in m:
944 944 raise RepoError("already have changeset " + short(f[:4]))
945 945
946 946 if fetch == [nullid]:
947 947 self.ui.warn("warning: pulling from an unrelated repository!\n")
948 948
949 949 self.ui.note("adding new changesets starting at " +
950 950 " ".join([short(f) for f in fetch]) + "\n")
951 951
952 952 self.ui.debug("%d total queries\n" % reqcnt)
953 953
954 954 return fetch
955 955
956 956 def changegroup(self, basenodes):
957 957 nodes = self.newer(basenodes)
958 958
959 959 # construct the link map
960 960 linkmap = {}
961 961 for n in nodes:
962 962 linkmap[self.changelog.rev(n)] = n
963 963
964 964 # construct a list of all changed files
965 965 changed = {}
966 966 for n in nodes:
967 967 c = self.changelog.read(n)
968 968 for f in c[3]:
969 969 changed[f] = 1
970 970 changed = changed.keys()
971 971 changed.sort()
972 972
973 973 # the changegroup is changesets + manifests + all file revs
974 974 revs = [ self.changelog.rev(n) for n in nodes ]
975 975
976 976 for y in self.changelog.group(linkmap): yield y
977 977 for y in self.manifest.group(linkmap): yield y
978 978 for f in changed:
979 979 yield struct.pack(">l", len(f) + 4) + f
980 980 g = self.file(f).group(linkmap)
981 981 for y in g:
982 982 yield y
983 983
984 984 def addchangegroup(self, generator):
985 985
986 986 class genread:
987 987 def __init__(self, generator):
988 988 self.g = generator
989 989 self.buf = ""
990 990 def read(self, l):
991 991 while l > len(self.buf):
992 992 try:
993 993 self.buf += self.g.next()
994 994 except StopIteration:
995 995 break
996 996 d, self.buf = self.buf[:l], self.buf[l:]
997 997 return d
998 998
999 999 def getchunk():
1000 1000 d = source.read(4)
1001 1001 if not d: return ""
1002 1002 l = struct.unpack(">l", d)[0]
1003 1003 if l <= 4: return ""
1004 1004 return source.read(l - 4)
1005 1005
1006 1006 def getgroup():
1007 1007 while 1:
1008 1008 c = getchunk()
1009 1009 if not c: break
1010 1010 yield c
1011 1011
1012 1012 def csmap(x):
1013 1013 self.ui.debug("add changeset %s\n" % short(x))
1014 1014 return self.changelog.count()
1015 1015
1016 1016 def revmap(x):
1017 1017 return self.changelog.rev(x)
1018 1018
1019 1019 if not generator: return
1020 1020 changesets = files = revisions = 0
1021 1021
1022 1022 source = genread(generator)
1023 1023 lock = self.lock()
1024 1024 tr = self.transaction()
1025 1025
1026 1026 # pull off the changeset group
1027 1027 self.ui.status("adding changesets\n")
1028 1028 co = self.changelog.tip()
1029 1029 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1030 1030 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1031 1031
1032 1032 # pull off the manifest group
1033 1033 self.ui.status("adding manifests\n")
1034 1034 mm = self.manifest.tip()
1035 1035 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1036 1036
1037 1037 # process the files
1038 1038 self.ui.status("adding file revisions\n")
1039 1039 while 1:
1040 1040 f = getchunk()
1041 1041 if not f: break
1042 1042 self.ui.debug("adding %s revisions\n" % f)
1043 1043 fl = self.file(f)
1044 1044 o = fl.count()
1045 1045 n = fl.addgroup(getgroup(), revmap, tr)
1046 1046 revisions += fl.count() - o
1047 1047 files += 1
1048 1048
1049 1049 self.ui.status(("modified %d files, added %d changesets" +
1050 1050 " and %d new revisions\n")
1051 1051 % (files, changesets, revisions))
1052 1052
1053 1053 tr.close()
1054 1054 return
1055 1055
1056 1056 def update(self, node, allow=False, force=False):
1057 1057 pl = self.dirstate.parents()
1058 1058 if not force and pl[1] != nullid:
1059 1059 self.ui.warn("aborting: outstanding uncommitted merges\n")
1060 1060 return
1061 1061
1062 1062 p1, p2 = pl[0], node
1063 1063 pa = self.changelog.ancestor(p1, p2)
1064 1064 m1n = self.changelog.read(p1)[0]
1065 1065 m2n = self.changelog.read(p2)[0]
1066 1066 man = self.manifest.ancestor(m1n, m2n)
1067 1067 m1 = self.manifest.read(m1n)
1068 1068 mf1 = self.manifest.readflags(m1n)
1069 1069 m2 = self.manifest.read(m2n)
1070 1070 mf2 = self.manifest.readflags(m2n)
1071 1071 ma = self.manifest.read(man)
1072 1072 mfa = self.manifest.readflags(man)
1073 1073
1074 1074 (c, a, d, u) = self.changes(None, None)
1075 1075
1076 1076 # is this a jump, or a merge? i.e. is there a linear path
1077 1077 # from p1 to p2?
1078 1078 linear_path = (pa == p1 or pa == p2)
1079 1079
1080 1080 # resolve the manifest to determine which files
1081 1081 # we care about merging
1082 1082 self.ui.note("resolving manifests\n")
1083 1083 self.ui.debug(" ancestor %s local %s remote %s\n" %
1084 1084 (short(man), short(m1n), short(m2n)))
1085 1085
1086 1086 merge = {}
1087 1087 get = {}
1088 1088 remove = []
1089 1089 mark = {}
1090 1090
1091 1091 # construct a working dir manifest
1092 1092 mw = m1.copy()
1093 1093 mfw = mf1.copy()
1094 1094 for f in a + c + u:
1095 1095 mw[f] = ""
1096 1096 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1097 1097 for f in d:
1098 1098 if f in mw: del mw[f]
1099 1099
1100 1100 # If we're jumping between revisions (as opposed to merging),
1101 1101 # and if neither the working directory nor the target rev has
1102 1102 # the file, then we need to remove it from the dirstate, to
1103 1103 # prevent the dirstate from listing the file when it is no
1104 1104 # longer in the manifest.
1105 1105 if linear_path and f not in m2:
1106 1106 self.dirstate.forget((f,))
1107 1107
1108 1108 for f, n in mw.iteritems():
1109 1109 if f in m2:
1110 1110 s = 0
1111 1111
1112 1112 # is the wfile new since m1, and match m2?
1113 1113 if f not in m1:
1114 1114 t1 = self.wfile(f).read()
1115 1115 t2 = self.file(f).revision(m2[f])
1116 1116 if cmp(t1, t2) == 0:
1117 1117 mark[f] = 1
1118 1118 n = m2[f]
1119 1119 del t1, t2
1120 1120
1121 1121 # are files different?
1122 1122 if n != m2[f]:
1123 1123 a = ma.get(f, nullid)
1124 1124 # are both different from the ancestor?
1125 1125 if n != a and m2[f] != a:
1126 1126 self.ui.debug(" %s versions differ, resolve\n" % f)
1127 1127 # merge executable bits
1128 1128 # "if we changed or they changed, change in merge"
1129 1129 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1130 1130 mode = ((a^b) | (a^c)) ^ a
1131 1131 merge[f] = (m1.get(f, nullid), m2[f], mode)
1132 1132 s = 1
1133 1133 # are we clobbering?
1134 1134 # is remote's version newer?
1135 1135 # or are we going back in time?
1136 1136 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1137 1137 self.ui.debug(" remote %s is newer, get\n" % f)
1138 1138 get[f] = m2[f]
1139 1139 s = 1
1140 1140 else:
1141 1141 mark[f] = 1
1142 1142
1143 1143 if not s and mfw[f] != mf2[f]:
1144 1144 if force:
1145 1145 self.ui.debug(" updating permissions for %s\n" % f)
1146 1146 util.set_exec(self.wjoin(f), mf2[f])
1147 1147 else:
1148 1148 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1149 1149 mode = ((a^b) | (a^c)) ^ a
1150 1150 if mode != b:
1151 1151 self.ui.debug(" updating permissions for %s\n" % f)
1152 1152 util.set_exec(self.wjoin(f), mode)
1153 1153 mark[f] = 1
1154 1154 del m2[f]
1155 1155 elif f in ma:
1156 1156 if not force and n != ma[f]:
1157 1157 r = ""
1158 1158 if linear_path or allow:
1159 1159 r = self.ui.prompt(
1160 1160 (" local changed %s which remote deleted\n" % f) +
1161 1161 "(k)eep or (d)elete?", "[kd]", "k")
1162 1162 if r == "d":
1163 1163 remove.append(f)
1164 1164 else:
1165 1165 self.ui.debug("other deleted %s\n" % f)
1166 1166 remove.append(f) # other deleted it
1167 1167 else:
1168 1168 if n == m1.get(f, nullid): # same as parent
1169 1169 if p2 == pa: # going backwards?
1170 1170 self.ui.debug("remote deleted %s\n" % f)
1171 1171 remove.append(f)
1172 1172 else:
1173 1173 self.ui.debug("local created %s, keeping\n" % f)
1174 1174 else:
1175 1175 self.ui.debug("working dir created %s, keeping\n" % f)
1176 1176
1177 1177 for f, n in m2.iteritems():
1178 1178 if f[0] == "/": continue
1179 1179 if not force and f in ma and n != ma[f]:
1180 1180 r = ""
1181 1181 if linear_path or allow:
1182 1182 r = self.ui.prompt(
1183 1183 ("remote changed %s which local deleted\n" % f) +
1184 1184 "(k)eep or (d)elete?", "[kd]", "k")
1185 1185 if r == "d": remove.append(f)
1186 1186 else:
1187 1187 self.ui.debug("remote created %s\n" % f)
1188 1188 get[f] = n
1189 1189
1190 1190 del mw, m1, m2, ma
1191 1191
1192 1192 if force:
1193 1193 for f in merge:
1194 1194 get[f] = merge[f][1]
1195 1195 merge = {}
1196 1196
1197 1197 if linear_path:
1198 1198 # we don't need to do any magic, just jump to the new rev
1199 1199 mode = 'n'
1200 1200 p1, p2 = p2, nullid
1201 1201 else:
1202 1202 if not allow:
1203 1203 self.ui.status("this update spans a branch" +
1204 1204 " affecting the following files:\n")
1205 1205 fl = merge.keys() + get.keys()
1206 1206 fl.sort()
1207 1207 for f in fl:
1208 1208 cf = ""
1209 1209 if f in merge: cf = " (resolve)"
1210 1210 self.ui.status(" %s%s\n" % (f, cf))
1211 1211 self.ui.warn("aborting update spanning branches!\n")
1212 1212 self.ui.status("(use update -m to perform a branch merge)\n")
1213 1213 return 1
1214 1214 # we have to remember what files we needed to get/change
1215 1215 # because any file that's different from either one of its
1216 1216 # parents must be in the changeset
1217 1217 mode = 'm'
1218 1218 self.dirstate.update(mark.keys(), "m")
1219 1219
1220 1220 self.dirstate.setparents(p1, p2)
1221 1221
1222 1222 # get the files we don't need to change
1223 1223 files = get.keys()
1224 1224 files.sort()
1225 1225 for f in files:
1226 1226 if f[0] == "/": continue
1227 1227 self.ui.note("getting %s\n" % f)
1228 1228 t = self.file(f).read(get[f])
1229 1229 try:
1230 1230 self.wfile(f, "w").write(t)
1231 1231 except IOError:
1232 1232 os.makedirs(os.path.dirname(self.wjoin(f)))
1233 1233 self.wfile(f, "w").write(t)
1234 1234 util.set_exec(self.wjoin(f), mf2[f])
1235 1235 self.dirstate.update([f], mode)
1236 1236
1237 1237 # merge the tricky bits
1238 1238 files = merge.keys()
1239 1239 files.sort()
1240 1240 for f in files:
1241 1241 self.ui.status("merging %s\n" % f)
1242 1242 m, o, flag = merge[f]
1243 1243 self.merge3(f, m, o)
1244 1244 util.set_exec(self.wjoin(f), flag)
1245 1245 self.dirstate.update([f], 'm')
1246 1246
1247 1247 for f in remove:
1248 1248 self.ui.note("removing %s\n" % f)
1249 1249 os.unlink(f)
1250 1250 if mode == 'n':
1251 1251 self.dirstate.forget(remove)
1252 1252 else:
1253 1253 self.dirstate.update(remove, 'r')
1254 1254
1255 1255 def merge3(self, fn, my, other):
1256 1256 """perform a 3-way merge in the working directory"""
1257 1257
1258 1258 def temp(prefix, node):
1259 1259 pre = "%s~%s." % (os.path.basename(fn), prefix)
1260 1260 (fd, name) = tempfile.mkstemp("", pre)
1261 1261 f = os.fdopen(fd, "wb")
1262 1262 f.write(fl.revision(node))
1263 1263 f.close()
1264 1264 return name
1265 1265
1266 1266 fl = self.file(fn)
1267 1267 base = fl.ancestor(my, other)
1268 1268 a = self.wjoin(fn)
1269 1269 b = temp("base", base)
1270 1270 c = temp("other", other)
1271 1271
1272 1272 self.ui.note("resolving %s\n" % fn)
1273 1273 self.ui.debug("file %s: other %s ancestor %s\n" %
1274 1274 (fn, short(other), short(base)))
1275 1275
1276 1276 cmd = os.environ.get("HGMERGE", "hgmerge")
1277 1277 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1278 1278 if r:
1279 1279 self.ui.warn("merging %s failed!\n" % fn)
1280 1280
1281 1281 os.unlink(b)
1282 1282 os.unlink(c)
1283 1283
1284 1284 def verify(self):
1285 1285 filelinkrevs = {}
1286 1286 filenodes = {}
1287 1287 changesets = revisions = files = 0
1288 1288 errors = 0
1289 1289
1290 1290 seen = {}
1291 1291 self.ui.status("checking changesets\n")
1292 1292 for i in range(self.changelog.count()):
1293 1293 changesets += 1
1294 1294 n = self.changelog.node(i)
1295 1295 if n in seen:
1296 1296 self.ui.warn("duplicate changeset at revision %d\n" % i)
1297 1297 errors += 1
1298 1298 seen[n] = 1
1299 1299
1300 1300 for p in self.changelog.parents(n):
1301 1301 if p not in self.changelog.nodemap:
1302 1302 self.ui.warn("changeset %s has unknown parent %s\n" %
1303 1303 (short(n), short(p)))
1304 1304 errors += 1
1305 1305 try:
1306 1306 changes = self.changelog.read(n)
1307 1307 except Exception, inst:
1308 1308 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1309 1309 errors += 1
1310 1310
1311 1311 for f in changes[3]:
1312 1312 filelinkrevs.setdefault(f, []).append(i)
1313 1313
1314 1314 seen = {}
1315 1315 self.ui.status("checking manifests\n")
1316 1316 for i in range(self.manifest.count()):
1317 1317 n = self.manifest.node(i)
1318 1318 if n in seen:
1319 1319 self.ui.warn("duplicate manifest at revision %d\n" % i)
1320 1320 errors += 1
1321 1321 seen[n] = 1
1322 1322
1323 1323 for p in self.manifest.parents(n):
1324 1324 if p not in self.manifest.nodemap:
1325 1325 self.ui.warn("manifest %s has unknown parent %s\n" %
1326 1326 (short(n), short(p)))
1327 1327 errors += 1
1328 1328
1329 1329 try:
1330 1330 delta = mdiff.patchtext(self.manifest.delta(n))
1331 1331 except KeyboardInterrupt:
1332 1332 print "aborted"
1333 1333 sys.exit(0)
1334 1334 except Exception, inst:
1335 1335 self.ui.warn("unpacking manifest %s: %s\n"
1336 1336 % (short(n), inst))
1337 1337 errors += 1
1338 1338
1339 1339 ff = [ l.split('\0') for l in delta.splitlines() ]
1340 1340 for f, fn in ff:
1341 1341 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1342 1342
1343 1343 self.ui.status("crosschecking files in changesets and manifests\n")
1344 1344 for f in filenodes:
1345 1345 if f not in filelinkrevs:
1346 1346 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1347 1347 errors += 1
1348 1348
1349 1349 for f in filelinkrevs:
1350 1350 if f not in filenodes:
1351 1351 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1352 1352 errors += 1
1353 1353
1354 1354 self.ui.status("checking files\n")
1355 1355 ff = filenodes.keys()
1356 1356 ff.sort()
1357 1357 for f in ff:
1358 1358 if f == "/dev/null": continue
1359 1359 files += 1
1360 1360 fl = self.file(f)
1361 1361 nodes = { nullid: 1 }
1362 1362 seen = {}
1363 1363 for i in range(fl.count()):
1364 1364 revisions += 1
1365 1365 n = fl.node(i)
1366 1366
1367 1367 if n in seen:
1368 1368 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1369 1369 errors += 1
1370 1370
1371 1371 if n not in filenodes[f]:
1372 1372 self.ui.warn("%s: %d:%s not in manifests\n"
1373 1373 % (f, i, short(n)))
1374 1374 print len(filenodes[f].keys()), fl.count(), f
1375 1375 errors += 1
1376 1376 else:
1377 1377 del filenodes[f][n]
1378 1378
1379 1379 flr = fl.linkrev(n)
1380 1380 if flr not in filelinkrevs[f]:
1381 1381 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1382 1382 % (f, short(n), fl.linkrev(n)))
1383 1383 errors += 1
1384 1384 else:
1385 1385 filelinkrevs[f].remove(flr)
1386 1386
1387 1387 # verify contents
1388 1388 try:
1389 1389 t = fl.read(n)
1390 1390 except Exception, inst:
1391 1391 self.ui.warn("unpacking file %s %s: %s\n"
1392 1392 % (f, short(n), inst))
1393 1393 errors += 1
1394 1394
1395 1395 # verify parents
1396 1396 (p1, p2) = fl.parents(n)
1397 1397 if p1 not in nodes:
1398 1398 self.ui.warn("file %s:%s unknown parent 1 %s" %
1399 1399 (f, short(n), short(p1)))
1400 1400 errors += 1
1401 1401 if p2 not in nodes:
1402 1402 self.ui.warn("file %s:%s unknown parent 2 %s" %
1403 1403 (f, short(n), short(p1)))
1404 1404 errors += 1
1405 1405 nodes[n] = 1
1406 1406
1407 1407 # cross-check
1408 1408 for node in filenodes[f]:
1409 1409 self.ui.warn("node %s in manifests not in %s\n"
1410 1410 % (hex(n), f))
1411 1411 errors += 1
1412 1412
1413 1413 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1414 1414 (files, changesets, revisions))
1415 1415
1416 1416 if errors:
1417 1417 self.ui.warn("%d integrity errors encountered!\n" % errors)
1418 1418 return 1
1419 1419
1420 1420 class remoterepository:
1421 1421 def __init__(self, ui, path):
1422 1422 self.url = path
1423 1423 self.ui = ui
1424 1424 no_list = [ "localhost", "127.0.0.1" ]
1425 1425 host = ui.config("http_proxy", "host")
1426 1426 if host is None:
1427 1427 host = os.environ.get("http_proxy")
1428 1428 if host and host.startswith('http://'):
1429 1429 host = host[7:]
1430 1430 user = ui.config("http_proxy", "user")
1431 1431 passwd = ui.config("http_proxy", "passwd")
1432 1432 no = ui.config("http_proxy", "no")
1433 1433 if no is None:
1434 1434 no = os.environ.get("no_proxy")
1435 1435 if no:
1436 1436 no_list = no_list + no.split(",")
1437 1437
1438 1438 no_proxy = 0
1439 1439 for h in no_list:
1440 1440 if (path.startswith("http://" + h + "/") or
1441 1441 path.startswith("http://" + h + ":") or
1442 1442 path == "http://" + h):
1443 1443 no_proxy = 1
1444 1444
1445 1445 # Note: urllib2 takes proxy values from the environment and those will
1446 1446 # take precedence
1447 1447 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1448 1448 if os.environ.has_key(env):
1449 1449 del os.environ[env]
1450 1450
1451 1451 proxy_handler = urllib2.BaseHandler()
1452 1452 if host and not no_proxy:
1453 1453 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1454 1454
1455 1455 authinfo = None
1456 1456 if user and passwd:
1457 1457 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1458 1458 passmgr.add_password(None, host, user, passwd)
1459 1459 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1460 1460
1461 1461 opener = urllib2.build_opener(proxy_handler, authinfo)
1462 1462 urllib2.install_opener(opener)
1463 1463
1464 1464 def do_cmd(self, cmd, **args):
1465 1465 self.ui.debug("sending %s command\n" % cmd)
1466 1466 q = {"cmd": cmd}
1467 1467 q.update(args)
1468 1468 qs = urllib.urlencode(q)
1469 1469 cu = "%s?%s" % (self.url, qs)
1470 1470 return urllib2.urlopen(cu)
1471 1471
1472 1472 def heads(self):
1473 1473 d = self.do_cmd("heads").read()
1474 1474 try:
1475 1475 return map(bin, d[:-1].split(" "))
1476 1476 except:
1477 1477 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1478 1478 raise
1479 1479
1480 1480 def branches(self, nodes):
1481 1481 n = " ".join(map(hex, nodes))
1482 1482 d = self.do_cmd("branches", nodes=n).read()
1483 1483 try:
1484 1484 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1485 1485 return br
1486 1486 except:
1487 1487 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1488 1488 raise
1489 1489
1490 1490 def between(self, pairs):
1491 1491 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1492 1492 d = self.do_cmd("between", pairs=n).read()
1493 1493 try:
1494 1494 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1495 1495 return p
1496 1496 except:
1497 1497 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1498 1498 raise
1499 1499
1500 1500 def changegroup(self, nodes):
1501 1501 n = " ".join(map(hex, nodes))
1502 1502 zd = zlib.decompressobj()
1503 1503 f = self.do_cmd("changegroup", roots=n)
1504 1504 bytes = 0
1505 1505 while 1:
1506 1506 d = f.read(4096)
1507 1507 bytes += len(d)
1508 1508 if not d:
1509 1509 yield zd.flush()
1510 1510 break
1511 1511 yield zd.decompress(d)
1512 1512 self.ui.note("%d bytes of data transfered\n" % bytes)
1513 1513
1514 1514 def repository(ui, path=None, create=0):
1515 1515 if path and path[:7] == "http://":
1516 1516 return remoterepository(ui, path)
1517 1517 if path and path[:5] == "hg://":
1518 1518 return remoterepository(ui, path.replace("hg://", "http://"))
1519 1519 if path and path[:11] == "old-http://":
1520 1520 return localrepository(ui, path.replace("old-http://", "http://"))
1521 1521 else:
1522 1522 return localrepository(ui, path, create)
1523 1523
General Comments 0
You need to be logged in to leave comments. Login now