##// END OF EJS Templates
Show number of new heads when doing a pull
mpm@selenic.com -
r1040:35e883d1 default
parent child Browse files
Show More
@@ -1,2287 +1,2294 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 14 demandload(globals(), "bisect errno select stat")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", self.encodedir(path + ".i")),
20 20 os.path.join("data", self.encodedir(path + ".d")))
21 21
22 22 # This avoids a collision between a file named foo and a dir named
23 23 # foo.i or foo.d
24 24 def encodedir(self, path):
25 25 return (path
26 26 .replace(".hg/", ".hg.hg/")
27 27 .replace(".i/", ".i.hg/")
28 28 .replace(".d/", ".d.hg/"))
29 29
30 30 def decodedir(self, path):
31 31 return (path
32 32 .replace(".d.hg/", ".d/")
33 33 .replace(".i.hg/", ".i/")
34 34 .replace(".hg.hg/", ".hg/"))
35 35
36 36 def read(self, node):
37 37 t = self.revision(node)
38 38 if not t.startswith('\1\n'):
39 39 return t
40 40 s = t.find('\1\n', 2)
41 41 return t[s+2:]
42 42
43 43 def readmeta(self, node):
44 44 t = self.revision(node)
45 45 if not t.startswith('\1\n'):
46 46 return t
47 47 s = t.find('\1\n', 2)
48 48 mt = t[2:s]
49 49 for l in mt.splitlines():
50 50 k, v = l.split(": ", 1)
51 51 m[k] = v
52 52 return m
53 53
54 54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 55 if meta or text.startswith('\1\n'):
56 56 mt = ""
57 57 if meta:
58 58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 60 return self.addrevision(text, transaction, link, p1, p2)
61 61
62 62 def annotate(self, node):
63 63
64 64 def decorate(text, rev):
65 65 return ([rev] * len(text.splitlines()), text)
66 66
67 67 def pair(parent, child):
68 68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 69 child[0][b1:b2] = parent[0][a1:a2]
70 70 return child
71 71
72 72 # find all ancestors
73 73 needed = {node:1}
74 74 visit = [node]
75 75 while visit:
76 76 n = visit.pop(0)
77 77 for p in self.parents(n):
78 78 if p not in needed:
79 79 needed[p] = 1
80 80 visit.append(p)
81 81 else:
82 82 # count how many times we'll use this
83 83 needed[p] += 1
84 84
85 85 # sort by revision which is a topological order
86 86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 87 visit.sort()
88 88 hist = {}
89 89
90 90 for r,n in visit:
91 91 curr = decorate(self.read(n), self.linkrev(n))
92 92 for p in self.parents(n):
93 93 if p != nullid:
94 94 curr = pair(hist[p], curr)
95 95 # trim the history of unneeded revs
96 96 needed[p] -= 1
97 97 if not needed[p]:
98 98 del hist[p]
99 99 hist[n] = curr
100 100
101 101 return zip(hist[n][0], hist[n][1].splitlines(1))
102 102
103 103 class manifest(revlog):
104 104 def __init__(self, opener):
105 105 self.mapcache = None
106 106 self.listcache = None
107 107 self.addlist = None
108 108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109 109
110 110 def read(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if self.mapcache and self.mapcache[0] == node:
113 113 return self.mapcache[1]
114 114 text = self.revision(node)
115 115 map = {}
116 116 flag = {}
117 117 self.listcache = (text, text.splitlines(1))
118 118 for l in self.listcache[1]:
119 119 (f, n) = l.split('\0')
120 120 map[f] = bin(n[:40])
121 121 flag[f] = (n[40:-1] == "x")
122 122 self.mapcache = (node, map, flag)
123 123 return map
124 124
125 125 def readflags(self, node):
126 126 if node == nullid: return {} # don't upset local cache
127 127 if not self.mapcache or self.mapcache[0] != node:
128 128 self.read(node)
129 129 return self.mapcache[2]
130 130
131 131 def diff(self, a, b):
132 132 # this is sneaky, as we're not actually using a and b
133 133 if self.listcache and self.addlist and self.listcache[0] == a:
134 134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 135 if mdiff.patch(a, d) != b:
136 136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 137 return mdiff.textdiff(a, b)
138 138 return d
139 139 else:
140 140 return mdiff.textdiff(a, b)
141 141
142 142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 143 changed=None):
144 144 # directly generate the mdiff delta from the data collected during
145 145 # the bisect loop below
146 146 def gendelta(delta):
147 147 i = 0
148 148 result = []
149 149 while i < len(delta):
150 150 start = delta[i][2]
151 151 end = delta[i][3]
152 152 l = delta[i][4]
153 153 if l == None:
154 154 l = ""
155 155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 156 and end >= delta[i+1][2]:
157 157 if delta[i+1][3] > end:
158 158 end = delta[i+1][3]
159 159 if delta[i+1][4]:
160 160 l += delta[i+1][4]
161 161 i += 1
162 162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 163 i += 1
164 164 return result
165 165
166 166 # apply the changes collected during the bisect loop to our addlist
167 167 def addlistdelta(addlist, delta):
168 168 # apply the deltas to the addlist. start from the bottom up
169 169 # so changes to the offsets don't mess things up.
170 170 i = len(delta)
171 171 while i > 0:
172 172 i -= 1
173 173 start = delta[i][0]
174 174 end = delta[i][1]
175 175 if delta[i][4]:
176 176 addlist[start:end] = [delta[i][4]]
177 177 else:
178 178 del addlist[start:end]
179 179 return addlist
180 180
181 181 # calculate the byte offset of the start of each line in the
182 182 # manifest
183 183 def calcoffsets(addlist):
184 184 offsets = [0] * (len(addlist) + 1)
185 185 offset = 0
186 186 i = 0
187 187 while i < len(addlist):
188 188 offsets[i] = offset
189 189 offset += len(addlist[i])
190 190 i += 1
191 191 offsets[i] = offset
192 192 return offsets
193 193
194 194 # if we're using the listcache, make sure it is valid and
195 195 # parented by the same node we're diffing against
196 196 if not changed or not self.listcache or not p1 or \
197 197 self.mapcache[0] != p1:
198 198 files = map.keys()
199 199 files.sort()
200 200
201 201 self.addlist = ["%s\000%s%s\n" %
202 202 (f, hex(map[f]), flags[f] and "x" or '')
203 203 for f in files]
204 204 cachedelta = None
205 205 else:
206 206 addlist = self.listcache[1]
207 207
208 208 # find the starting offset for each line in the add list
209 209 offsets = calcoffsets(addlist)
210 210
211 211 # combine the changed lists into one list for sorting
212 212 work = [[x, 0] for x in changed[0]]
213 213 work[len(work):] = [[x, 1] for x in changed[1]]
214 214 work.sort()
215 215
216 216 delta = []
217 217 bs = 0
218 218
219 219 for w in work:
220 220 f = w[0]
221 221 # bs will either be the index of the item or the insert point
222 222 bs = bisect.bisect(addlist, f, bs)
223 223 if bs < len(addlist):
224 224 fn = addlist[bs][:addlist[bs].index('\0')]
225 225 else:
226 226 fn = None
227 227 if w[1] == 0:
228 228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 229 flags[f] and "x" or '')
230 230 else:
231 231 l = None
232 232 start = bs
233 233 if fn != f:
234 234 # item not found, insert a new one
235 235 end = bs
236 236 if w[1] == 1:
237 237 sys.stderr.write("failed to remove %s from manifest\n"
238 238 % f)
239 239 sys.exit(1)
240 240 else:
241 241 # item is found, replace/delete the existing line
242 242 end = bs + 1
243 243 delta.append([start, end, offsets[start], offsets[end], l])
244 244
245 245 self.addlist = addlistdelta(addlist, delta)
246 246 if self.mapcache[0] == self.tip():
247 247 cachedelta = "".join(gendelta(delta))
248 248 else:
249 249 cachedelta = None
250 250
251 251 text = "".join(self.addlist)
252 252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 253 sys.stderr.write("manifest delta failure\n")
254 254 sys.exit(1)
255 255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 256 self.mapcache = (n, map, flags)
257 257 self.listcache = (text, self.addlist)
258 258 self.addlist = None
259 259
260 260 return n
261 261
262 262 class changelog(revlog):
263 263 def __init__(self, opener):
264 264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265 265
266 266 def extract(self, text):
267 267 if not text:
268 268 return (nullid, "", "0", [], "")
269 269 last = text.index("\n\n")
270 270 desc = text[last + 2:]
271 271 l = text[:last].splitlines()
272 272 manifest = bin(l[0])
273 273 user = l[1]
274 274 date = l[2]
275 275 if " " not in date:
276 276 date += " 0" # some tools used -d without a timezone
277 277 files = l[3:]
278 278 return (manifest, user, date, files, desc)
279 279
280 280 def read(self, node):
281 281 return self.extract(self.revision(node))
282 282
283 283 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
284 284 user=None, date=None):
285 285 if not date:
286 286 if time.daylight: offset = time.altzone
287 287 else: offset = time.timezone
288 288 date = "%d %d" % (time.time(), offset)
289 289 list.sort()
290 290 l = [hex(manifest), user, date] + list + ["", desc]
291 291 text = "\n".join(l)
292 292 return self.addrevision(text, transaction, self.count(), p1, p2)
293 293
294 294 class dirstate:
295 295 def __init__(self, opener, ui, root):
296 296 self.opener = opener
297 297 self.root = root
298 298 self.dirty = 0
299 299 self.ui = ui
300 300 self.map = None
301 301 self.pl = None
302 302 self.copies = {}
303 303 self.ignorefunc = None
304 304
305 305 def wjoin(self, f):
306 306 return os.path.join(self.root, f)
307 307
308 308 def getcwd(self):
309 309 cwd = os.getcwd()
310 310 if cwd == self.root: return ''
311 311 return cwd[len(self.root) + 1:]
312 312
313 313 def ignore(self, f):
314 314 if not self.ignorefunc:
315 315 bigpat = []
316 316 try:
317 317 l = file(self.wjoin(".hgignore"))
318 318 for pat in l:
319 319 p = pat.rstrip()
320 320 if p:
321 321 try:
322 322 re.compile(p)
323 323 except:
324 324 self.ui.warn("ignoring invalid ignore"
325 325 + " regular expression '%s'\n" % p)
326 326 else:
327 327 bigpat.append(p)
328 328 except IOError: pass
329 329
330 330 if bigpat:
331 331 s = "(?:%s)" % (")|(?:".join(bigpat))
332 332 r = re.compile(s)
333 333 self.ignorefunc = r.search
334 334 else:
335 335 self.ignorefunc = util.never
336 336
337 337 return self.ignorefunc(f)
338 338
339 339 def __del__(self):
340 340 if self.dirty:
341 341 self.write()
342 342
343 343 def __getitem__(self, key):
344 344 try:
345 345 return self.map[key]
346 346 except TypeError:
347 347 self.read()
348 348 return self[key]
349 349
350 350 def __contains__(self, key):
351 351 if not self.map: self.read()
352 352 return key in self.map
353 353
354 354 def parents(self):
355 355 if not self.pl:
356 356 self.read()
357 357 return self.pl
358 358
359 359 def markdirty(self):
360 360 if not self.dirty:
361 361 self.dirty = 1
362 362
363 363 def setparents(self, p1, p2 = nullid):
364 364 self.markdirty()
365 365 self.pl = p1, p2
366 366
367 367 def state(self, key):
368 368 try:
369 369 return self[key][0]
370 370 except KeyError:
371 371 return "?"
372 372
373 373 def read(self):
374 374 if self.map is not None: return self.map
375 375
376 376 self.map = {}
377 377 self.pl = [nullid, nullid]
378 378 try:
379 379 st = self.opener("dirstate").read()
380 380 if not st: return
381 381 except: return
382 382
383 383 self.pl = [st[:20], st[20: 40]]
384 384
385 385 pos = 40
386 386 while pos < len(st):
387 387 e = struct.unpack(">cllll", st[pos:pos+17])
388 388 l = e[4]
389 389 pos += 17
390 390 f = st[pos:pos + l]
391 391 if '\0' in f:
392 392 f, c = f.split('\0')
393 393 self.copies[f] = c
394 394 self.map[f] = e[:4]
395 395 pos += l
396 396
397 397 def copy(self, source, dest):
398 398 self.read()
399 399 self.markdirty()
400 400 self.copies[dest] = source
401 401
402 402 def copied(self, file):
403 403 return self.copies.get(file, None)
404 404
405 405 def update(self, files, state, **kw):
406 406 ''' current states:
407 407 n normal
408 408 m needs merging
409 409 r marked for removal
410 410 a marked for addition'''
411 411
412 412 if not files: return
413 413 self.read()
414 414 self.markdirty()
415 415 for f in files:
416 416 if state == "r":
417 417 self.map[f] = ('r', 0, 0, 0)
418 418 else:
419 419 s = os.stat(os.path.join(self.root, f))
420 420 st_size = kw.get('st_size', s.st_size)
421 421 st_mtime = kw.get('st_mtime', s.st_mtime)
422 422 self.map[f] = (state, s.st_mode, st_size, st_mtime)
423 423
424 424 def forget(self, files):
425 425 if not files: return
426 426 self.read()
427 427 self.markdirty()
428 428 for f in files:
429 429 try:
430 430 del self.map[f]
431 431 except KeyError:
432 432 self.ui.warn("not in dirstate: %s!\n" % f)
433 433 pass
434 434
435 435 def clear(self):
436 436 self.map = {}
437 437 self.markdirty()
438 438
439 439 def write(self):
440 440 st = self.opener("dirstate", "w")
441 441 st.write("".join(self.pl))
442 442 for f, e in self.map.items():
443 443 c = self.copied(f)
444 444 if c:
445 445 f = f + "\0" + c
446 446 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
447 447 st.write(e + f)
448 448 self.dirty = 0
449 449
450 450 def filterfiles(self, files):
451 451 ret = {}
452 452 unknown = []
453 453
454 454 for x in files:
455 455 if x is '.':
456 456 return self.map.copy()
457 457 if x not in self.map:
458 458 unknown.append(x)
459 459 else:
460 460 ret[x] = self.map[x]
461 461
462 462 if not unknown:
463 463 return ret
464 464
465 465 b = self.map.keys()
466 466 b.sort()
467 467 blen = len(b)
468 468
469 469 for x in unknown:
470 470 bs = bisect.bisect(b, x)
471 471 if bs != 0 and b[bs-1] == x:
472 472 ret[x] = self.map[x]
473 473 continue
474 474 while bs < blen:
475 475 s = b[bs]
476 476 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
477 477 ret[s] = self.map[s]
478 478 else:
479 479 break
480 480 bs += 1
481 481 return ret
482 482
483 483 def walk(self, files = None, match = util.always, dc=None):
484 484 self.read()
485 485
486 486 # walk all files by default
487 487 if not files:
488 488 files = [self.root]
489 489 if not dc:
490 490 dc = self.map.copy()
491 491 elif not dc:
492 492 dc = self.filterfiles(files)
493 493
494 494 known = {'.hg': 1}
495 495 def seen(fn):
496 496 if fn in known: return True
497 497 known[fn] = 1
498 498 def traverse():
499 499 for ff in util.unique(files):
500 500 f = os.path.join(self.root, ff)
501 501 try:
502 502 st = os.stat(f)
503 503 except OSError, inst:
504 504 if ff not in dc: self.ui.warn('%s: %s\n' % (
505 505 util.pathto(self.getcwd(), ff),
506 506 inst.strerror))
507 507 continue
508 508 if stat.S_ISDIR(st.st_mode):
509 509 for dir, subdirs, fl in os.walk(f):
510 510 d = dir[len(self.root) + 1:]
511 511 nd = util.normpath(d)
512 512 if nd == '.': nd = ''
513 513 if seen(nd):
514 514 subdirs[:] = []
515 515 continue
516 516 for sd in subdirs:
517 517 ds = os.path.join(nd, sd +'/')
518 518 if self.ignore(ds) or not match(ds):
519 519 subdirs.remove(sd)
520 520 subdirs.sort()
521 521 fl.sort()
522 522 for fn in fl:
523 523 fn = util.pconvert(os.path.join(d, fn))
524 524 yield 'f', fn
525 525 elif stat.S_ISREG(st.st_mode):
526 526 yield 'f', ff
527 527 else:
528 528 kind = 'unknown'
529 529 if stat.S_ISCHR(st.st_mode): kind = 'character device'
530 530 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
531 531 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
532 532 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
533 533 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
534 534 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
535 535 util.pathto(self.getcwd(), ff),
536 536 kind))
537 537
538 538 ks = dc.keys()
539 539 ks.sort()
540 540 for k in ks:
541 541 yield 'm', k
542 542
543 543 # yield only files that match: all in dirstate, others only if
544 544 # not in .hgignore
545 545
546 546 for src, fn in util.unique(traverse()):
547 547 fn = util.normpath(fn)
548 548 if seen(fn): continue
549 549 if fn not in dc and self.ignore(fn):
550 550 continue
551 551 if match(fn):
552 552 yield src, fn
553 553
554 554 def changes(self, files=None, match=util.always):
555 555 self.read()
556 556 if not files:
557 557 dc = self.map.copy()
558 558 else:
559 559 dc = self.filterfiles(files)
560 560 lookup, modified, added, unknown = [], [], [], []
561 561 removed, deleted = [], []
562 562
563 563 for src, fn in self.walk(files, match, dc=dc):
564 564 try:
565 565 s = os.stat(os.path.join(self.root, fn))
566 566 except OSError:
567 567 continue
568 568 if not stat.S_ISREG(s.st_mode):
569 569 continue
570 570 c = dc.get(fn)
571 571 if c:
572 572 del dc[fn]
573 573 if c[0] == 'm':
574 574 modified.append(fn)
575 575 elif c[0] == 'a':
576 576 added.append(fn)
577 577 elif c[0] == 'r':
578 578 unknown.append(fn)
579 579 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
580 580 modified.append(fn)
581 581 elif c[3] != s.st_mtime:
582 582 lookup.append(fn)
583 583 else:
584 584 unknown.append(fn)
585 585
586 586 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
587 587 if c[0] == 'r':
588 588 removed.append(fn)
589 589 else:
590 590 deleted.append(fn)
591 591 return (lookup, modified, added, removed + deleted, unknown)
592 592
593 593 # used to avoid circular references so destructors work
594 594 def opener(base):
595 595 p = base
596 596 def o(path, mode="r"):
597 597 if p.startswith("http://"):
598 598 f = os.path.join(p, urllib.quote(path))
599 599 return httprangereader.httprangereader(f)
600 600
601 601 f = os.path.join(p, path)
602 602
603 603 mode += "b" # for that other OS
604 604
605 605 if mode[0] != "r":
606 606 try:
607 607 s = os.stat(f)
608 608 except OSError:
609 609 d = os.path.dirname(f)
610 610 if not os.path.isdir(d):
611 611 os.makedirs(d)
612 612 else:
613 613 if s.st_nlink > 1:
614 614 file(f + ".tmp", "wb").write(file(f, "rb").read())
615 615 util.rename(f+".tmp", f)
616 616
617 617 return file(f, mode)
618 618
619 619 return o
620 620
621 621 class RepoError(Exception): pass
622 622
623 623 class localrepository:
624 624 def __init__(self, ui, path=None, create=0):
625 625 self.remote = 0
626 626 if path and path.startswith("http://"):
627 627 self.remote = 1
628 628 self.path = path
629 629 else:
630 630 if not path:
631 631 p = os.getcwd()
632 632 while not os.path.isdir(os.path.join(p, ".hg")):
633 633 oldp = p
634 634 p = os.path.dirname(p)
635 635 if p == oldp: raise RepoError("no repo found")
636 636 path = p
637 637 self.path = os.path.join(path, ".hg")
638 638
639 639 if not create and not os.path.isdir(self.path):
640 640 raise RepoError("repository %s not found" % self.path)
641 641
642 642 self.root = os.path.abspath(path)
643 643 self.ui = ui
644 644
645 645 if create:
646 646 os.mkdir(self.path)
647 647 os.mkdir(self.join("data"))
648 648
649 649 self.opener = opener(self.path)
650 650 self.wopener = opener(self.root)
651 651 self.manifest = manifest(self.opener)
652 652 self.changelog = changelog(self.opener)
653 653 self.tagscache = None
654 654 self.nodetagscache = None
655 655
656 656 if not self.remote:
657 657 self.dirstate = dirstate(self.opener, ui, self.root)
658 658 try:
659 659 self.ui.readconfig(self.opener("hgrc"))
660 660 except IOError: pass
661 661
662 662 def hook(self, name, **args):
663 663 s = self.ui.config("hooks", name)
664 664 if s:
665 665 self.ui.note("running hook %s: %s\n" % (name, s))
666 666 old = {}
667 667 for k, v in args.items():
668 668 k = k.upper()
669 669 old[k] = os.environ.get(k, None)
670 670 os.environ[k] = v
671 671
672 672 r = os.system(s)
673 673
674 674 for k, v in old.items():
675 675 if v != None:
676 676 os.environ[k] = v
677 677 else:
678 678 del os.environ[k]
679 679
680 680 if r:
681 681 self.ui.warn("abort: %s hook failed with status %d!\n" %
682 682 (name, r))
683 683 return False
684 684 return True
685 685
686 686 def tags(self):
687 687 '''return a mapping of tag to node'''
688 688 if not self.tagscache:
689 689 self.tagscache = {}
690 690 def addtag(self, k, n):
691 691 try:
692 692 bin_n = bin(n)
693 693 except TypeError:
694 694 bin_n = ''
695 695 self.tagscache[k.strip()] = bin_n
696 696
697 697 try:
698 698 # read each head of the tags file, ending with the tip
699 699 # and add each tag found to the map, with "newer" ones
700 700 # taking precedence
701 701 fl = self.file(".hgtags")
702 702 h = fl.heads()
703 703 h.reverse()
704 704 for r in h:
705 705 for l in fl.read(r).splitlines():
706 706 if l:
707 707 n, k = l.split(" ", 1)
708 708 addtag(self, k, n)
709 709 except KeyError:
710 710 pass
711 711
712 712 try:
713 713 f = self.opener("localtags")
714 714 for l in f:
715 715 n, k = l.split(" ", 1)
716 716 addtag(self, k, n)
717 717 except IOError:
718 718 pass
719 719
720 720 self.tagscache['tip'] = self.changelog.tip()
721 721
722 722 return self.tagscache
723 723
724 724 def tagslist(self):
725 725 '''return a list of tags ordered by revision'''
726 726 l = []
727 727 for t, n in self.tags().items():
728 728 try:
729 729 r = self.changelog.rev(n)
730 730 except:
731 731 r = -2 # sort to the beginning of the list if unknown
732 732 l.append((r,t,n))
733 733 l.sort()
734 734 return [(t,n) for r,t,n in l]
735 735
736 736 def nodetags(self, node):
737 737 '''return the tags associated with a node'''
738 738 if not self.nodetagscache:
739 739 self.nodetagscache = {}
740 740 for t,n in self.tags().items():
741 741 self.nodetagscache.setdefault(n,[]).append(t)
742 742 return self.nodetagscache.get(node, [])
743 743
744 744 def lookup(self, key):
745 745 try:
746 746 return self.tags()[key]
747 747 except KeyError:
748 748 try:
749 749 return self.changelog.lookup(key)
750 750 except:
751 751 raise RepoError("unknown revision '%s'" % key)
752 752
753 753 def dev(self):
754 754 if self.remote: return -1
755 755 return os.stat(self.path).st_dev
756 756
757 757 def local(self):
758 758 return not self.remote
759 759
760 760 def join(self, f):
761 761 return os.path.join(self.path, f)
762 762
763 763 def wjoin(self, f):
764 764 return os.path.join(self.root, f)
765 765
766 766 def file(self, f):
767 767 if f[0] == '/': f = f[1:]
768 768 return filelog(self.opener, f)
769 769
770 770 def getcwd(self):
771 771 return self.dirstate.getcwd()
772 772
773 773 def wfile(self, f, mode='r'):
774 774 return self.wopener(f, mode)
775 775
776 776 def wread(self, filename):
777 777 return self.wopener(filename, 'r').read()
778 778
779 779 def wwrite(self, filename, data, fd=None):
780 780 if fd:
781 781 return fd.write(data)
782 782 return self.wopener(filename, 'w').write(data)
783 783
784 784 def transaction(self):
785 785 # save dirstate for undo
786 786 try:
787 787 ds = self.opener("dirstate").read()
788 788 except IOError:
789 789 ds = ""
790 790 self.opener("journal.dirstate", "w").write(ds)
791 791
792 792 def after():
793 793 util.rename(self.join("journal"), self.join("undo"))
794 794 util.rename(self.join("journal.dirstate"),
795 795 self.join("undo.dirstate"))
796 796
797 797 return transaction.transaction(self.ui.warn, self.opener,
798 798 self.join("journal"), after)
799 799
800 800 def recover(self):
801 801 lock = self.lock()
802 802 if os.path.exists(self.join("journal")):
803 803 self.ui.status("rolling back interrupted transaction\n")
804 804 return transaction.rollback(self.opener, self.join("journal"))
805 805 else:
806 806 self.ui.warn("no interrupted transaction available\n")
807 807
808 808 def undo(self):
809 809 lock = self.lock()
810 810 if os.path.exists(self.join("undo")):
811 811 self.ui.status("rolling back last transaction\n")
812 812 transaction.rollback(self.opener, self.join("undo"))
813 813 self.dirstate = None
814 814 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
815 815 self.dirstate = dirstate(self.opener, self.ui, self.root)
816 816 else:
817 817 self.ui.warn("no undo information available\n")
818 818
819 819 def lock(self, wait = 1):
820 820 try:
821 821 return lock.lock(self.join("lock"), 0)
822 822 except lock.LockHeld, inst:
823 823 if wait:
824 824 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
825 825 return lock.lock(self.join("lock"), wait)
826 826 raise inst
827 827
828 828 def rawcommit(self, files, text, user, date, p1=None, p2=None):
829 829 orig_parent = self.dirstate.parents()[0] or nullid
830 830 p1 = p1 or self.dirstate.parents()[0] or nullid
831 831 p2 = p2 or self.dirstate.parents()[1] or nullid
832 832 c1 = self.changelog.read(p1)
833 833 c2 = self.changelog.read(p2)
834 834 m1 = self.manifest.read(c1[0])
835 835 mf1 = self.manifest.readflags(c1[0])
836 836 m2 = self.manifest.read(c2[0])
837 837 changed = []
838 838
839 839 if orig_parent == p1:
840 840 update_dirstate = 1
841 841 else:
842 842 update_dirstate = 0
843 843
844 844 tr = self.transaction()
845 845 mm = m1.copy()
846 846 mfm = mf1.copy()
847 847 linkrev = self.changelog.count()
848 848 for f in files:
849 849 try:
850 850 t = self.wread(f)
851 851 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
852 852 r = self.file(f)
853 853 mfm[f] = tm
854 854
855 855 fp1 = m1.get(f, nullid)
856 856 fp2 = m2.get(f, nullid)
857 857
858 858 # is the same revision on two branches of a merge?
859 859 if fp2 == fp1:
860 860 fp2 = nullid
861 861
862 862 if fp2 != nullid:
863 863 # is one parent an ancestor of the other?
864 864 fpa = r.ancestor(fp1, fp2)
865 865 if fpa == fp1:
866 866 fp1, fp2 = fp2, nullid
867 867 elif fpa == fp2:
868 868 fp2 = nullid
869 869
870 870 # is the file unmodified from the parent?
871 871 if t == r.read(fp1):
872 872 # record the proper existing parent in manifest
873 873 # no need to add a revision
874 874 mm[f] = fp1
875 875 continue
876 876
877 877 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
878 878 changed.append(f)
879 879 if update_dirstate:
880 880 self.dirstate.update([f], "n")
881 881 except IOError:
882 882 try:
883 883 del mm[f]
884 884 del mfm[f]
885 885 if update_dirstate:
886 886 self.dirstate.forget([f])
887 887 except:
888 888 # deleted from p2?
889 889 pass
890 890
891 891 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
892 892 user = user or self.ui.username()
893 893 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
894 894 tr.close()
895 895 if update_dirstate:
896 896 self.dirstate.setparents(n, nullid)
897 897
898 898 def commit(self, files = None, text = "", user = None, date = None,
899 899 match = util.always, force=False):
900 900 commit = []
901 901 remove = []
902 902 changed = []
903 903
904 904 if files:
905 905 for f in files:
906 906 s = self.dirstate.state(f)
907 907 if s in 'nmai':
908 908 commit.append(f)
909 909 elif s == 'r':
910 910 remove.append(f)
911 911 else:
912 912 self.ui.warn("%s not tracked!\n" % f)
913 913 else:
914 914 (c, a, d, u) = self.changes(match = match)
915 915 commit = c + a
916 916 remove = d
917 917
918 918 p1, p2 = self.dirstate.parents()
919 919 c1 = self.changelog.read(p1)
920 920 c2 = self.changelog.read(p2)
921 921 m1 = self.manifest.read(c1[0])
922 922 mf1 = self.manifest.readflags(c1[0])
923 923 m2 = self.manifest.read(c2[0])
924 924
925 925 if not commit and not remove and not force and p2 == nullid:
926 926 self.ui.status("nothing changed\n")
927 927 return None
928 928
929 929 if not self.hook("precommit"):
930 930 return None
931 931
932 932 lock = self.lock()
933 933 tr = self.transaction()
934 934
935 935 # check in files
936 936 new = {}
937 937 linkrev = self.changelog.count()
938 938 commit.sort()
939 939 for f in commit:
940 940 self.ui.note(f + "\n")
941 941 try:
942 942 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
943 943 t = self.wread(f)
944 944 except IOError:
945 945 self.ui.warn("trouble committing %s!\n" % f)
946 946 raise
947 947
948 948 meta = {}
949 949 cp = self.dirstate.copied(f)
950 950 if cp:
951 951 meta["copy"] = cp
952 952 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
953 953 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
954 954
955 955 r = self.file(f)
956 956 fp1 = m1.get(f, nullid)
957 957 fp2 = m2.get(f, nullid)
958 958
959 959 # is the same revision on two branches of a merge?
960 960 if fp2 == fp1:
961 961 fp2 = nullid
962 962
963 963 if fp2 != nullid:
964 964 # is one parent an ancestor of the other?
965 965 fpa = r.ancestor(fp1, fp2)
966 966 if fpa == fp1:
967 967 fp1, fp2 = fp2, nullid
968 968 elif fpa == fp2:
969 969 fp2 = nullid
970 970
971 971 # is the file unmodified from the parent?
972 972 if not meta and t == r.read(fp1):
973 973 # record the proper existing parent in manifest
974 974 # no need to add a revision
975 975 new[f] = fp1
976 976 continue
977 977
978 978 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
979 979 # remember what we've added so that we can later calculate
980 980 # the files to pull from a set of changesets
981 981 changed.append(f)
982 982
983 983 # update manifest
984 984 m1.update(new)
985 985 for f in remove:
986 986 if f in m1:
987 987 del m1[f]
988 988 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
989 989 (new, remove))
990 990
991 991 # add changeset
992 992 new = new.keys()
993 993 new.sort()
994 994
995 995 if not text:
996 996 edittext = ""
997 997 if p2 != nullid:
998 998 edittext += "HG: branch merge\n"
999 999 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
1000 1000 edittext += "".join(["HG: changed %s\n" % f for f in changed])
1001 1001 edittext += "".join(["HG: removed %s\n" % f for f in remove])
1002 1002 if not changed and not remove:
1003 1003 edittext += "HG: no files changed\n"
1004 1004 edittext = self.ui.edit(edittext)
1005 1005 if not edittext.rstrip():
1006 1006 return None
1007 1007 text = edittext
1008 1008
1009 1009 user = user or self.ui.username()
1010 1010 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
1011 1011 tr.close()
1012 1012
1013 1013 self.dirstate.setparents(n)
1014 1014 self.dirstate.update(new, "n")
1015 1015 self.dirstate.forget(remove)
1016 1016
1017 1017 if not self.hook("commit", node=hex(n)):
1018 1018 return None
1019 1019 return n
1020 1020
1021 1021 def walk(self, node = None, files = [], match = util.always):
1022 1022 if node:
1023 1023 for fn in self.manifest.read(self.changelog.read(node)[0]):
1024 1024 if match(fn): yield 'm', fn
1025 1025 else:
1026 1026 for src, fn in self.dirstate.walk(files, match):
1027 1027 yield src, fn
1028 1028
1029 1029 def changes(self, node1 = None, node2 = None, files = [],
1030 1030 match = util.always):
1031 1031 mf2, u = None, []
1032 1032
1033 1033 def fcmp(fn, mf):
1034 1034 t1 = self.wread(fn)
1035 1035 t2 = self.file(fn).read(mf.get(fn, nullid))
1036 1036 return cmp(t1, t2)
1037 1037
1038 1038 def mfmatches(node):
1039 1039 mf = dict(self.manifest.read(node))
1040 1040 for fn in mf.keys():
1041 1041 if not match(fn):
1042 1042 del mf[fn]
1043 1043 return mf
1044 1044
1045 1045 # are we comparing the working directory?
1046 1046 if not node2:
1047 1047 l, c, a, d, u = self.dirstate.changes(files, match)
1048 1048
1049 1049 # are we comparing working dir against its parent?
1050 1050 if not node1:
1051 1051 if l:
1052 1052 # do a full compare of any files that might have changed
1053 1053 change = self.changelog.read(self.dirstate.parents()[0])
1054 1054 mf2 = mfmatches(change[0])
1055 1055 for f in l:
1056 1056 if fcmp(f, mf2):
1057 1057 c.append(f)
1058 1058
1059 1059 for l in c, a, d, u:
1060 1060 l.sort()
1061 1061
1062 1062 return (c, a, d, u)
1063 1063
1064 1064 # are we comparing working dir against non-tip?
1065 1065 # generate a pseudo-manifest for the working dir
1066 1066 if not node2:
1067 1067 if not mf2:
1068 1068 change = self.changelog.read(self.dirstate.parents()[0])
1069 1069 mf2 = mfmatches(change[0])
1070 1070 for f in a + c + l:
1071 1071 mf2[f] = ""
1072 1072 for f in d:
1073 1073 if f in mf2: del mf2[f]
1074 1074 else:
1075 1075 change = self.changelog.read(node2)
1076 1076 mf2 = mfmatches(change[0])
1077 1077
1078 1078 # flush lists from dirstate before comparing manifests
1079 1079 c, a = [], []
1080 1080
1081 1081 change = self.changelog.read(node1)
1082 1082 mf1 = mfmatches(change[0])
1083 1083
1084 1084 for fn in mf2:
1085 1085 if mf1.has_key(fn):
1086 1086 if mf1[fn] != mf2[fn]:
1087 1087 if mf2[fn] != "" or fcmp(fn, mf1):
1088 1088 c.append(fn)
1089 1089 del mf1[fn]
1090 1090 else:
1091 1091 a.append(fn)
1092 1092
1093 1093 d = mf1.keys()
1094 1094
1095 1095 for l in c, a, d, u:
1096 1096 l.sort()
1097 1097
1098 1098 return (c, a, d, u)
1099 1099
1100 1100 def add(self, list):
1101 1101 for f in list:
1102 1102 p = self.wjoin(f)
1103 1103 if not os.path.exists(p):
1104 1104 self.ui.warn("%s does not exist!\n" % f)
1105 1105 elif not os.path.isfile(p):
1106 1106 self.ui.warn("%s not added: only files supported currently\n" % f)
1107 1107 elif self.dirstate.state(f) in 'an':
1108 1108 self.ui.warn("%s already tracked!\n" % f)
1109 1109 else:
1110 1110 self.dirstate.update([f], "a")
1111 1111
1112 1112 def forget(self, list):
1113 1113 for f in list:
1114 1114 if self.dirstate.state(f) not in 'ai':
1115 1115 self.ui.warn("%s not added!\n" % f)
1116 1116 else:
1117 1117 self.dirstate.forget([f])
1118 1118
1119 1119 def remove(self, list):
1120 1120 for f in list:
1121 1121 p = self.wjoin(f)
1122 1122 if os.path.exists(p):
1123 1123 self.ui.warn("%s still exists!\n" % f)
1124 1124 elif self.dirstate.state(f) == 'a':
1125 1125 self.ui.warn("%s never committed!\n" % f)
1126 1126 self.dirstate.forget([f])
1127 1127 elif f not in self.dirstate:
1128 1128 self.ui.warn("%s not tracked!\n" % f)
1129 1129 else:
1130 1130 self.dirstate.update([f], "r")
1131 1131
1132 1132 def copy(self, source, dest):
1133 1133 p = self.wjoin(dest)
1134 1134 if not os.path.exists(p):
1135 1135 self.ui.warn("%s does not exist!\n" % dest)
1136 1136 elif not os.path.isfile(p):
1137 1137 self.ui.warn("copy failed: %s is not a file\n" % dest)
1138 1138 else:
1139 1139 if self.dirstate.state(dest) == '?':
1140 1140 self.dirstate.update([dest], "a")
1141 1141 self.dirstate.copy(source, dest)
1142 1142
1143 1143 def heads(self):
1144 1144 return self.changelog.heads()
1145 1145
1146 1146 # branchlookup returns a dict giving a list of branches for
1147 1147 # each head. A branch is defined as the tag of a node or
1148 1148 # the branch of the node's parents. If a node has multiple
1149 1149 # branch tags, tags are eliminated if they are visible from other
1150 1150 # branch tags.
1151 1151 #
1152 1152 # So, for this graph: a->b->c->d->e
1153 1153 # \ /
1154 1154 # aa -----/
1155 1155 # a has tag 2.6.12
1156 1156 # d has tag 2.6.13
1157 1157 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1158 1158 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1159 1159 # from the list.
1160 1160 #
1161 1161 # It is possible that more than one head will have the same branch tag.
1162 1162 # callers need to check the result for multiple heads under the same
1163 1163 # branch tag if that is a problem for them (ie checkout of a specific
1164 1164 # branch).
1165 1165 #
1166 1166 # passing in a specific branch will limit the depth of the search
1167 1167 # through the parents. It won't limit the branches returned in the
1168 1168 # result though.
1169 1169 def branchlookup(self, heads=None, branch=None):
1170 1170 if not heads:
1171 1171 heads = self.heads()
1172 1172 headt = [ h for h in heads ]
1173 1173 chlog = self.changelog
1174 1174 branches = {}
1175 1175 merges = []
1176 1176 seenmerge = {}
1177 1177
1178 1178 # traverse the tree once for each head, recording in the branches
1179 1179 # dict which tags are visible from this head. The branches
1180 1180 # dict also records which tags are visible from each tag
1181 1181 # while we traverse.
1182 1182 while headt or merges:
1183 1183 if merges:
1184 1184 n, found = merges.pop()
1185 1185 visit = [n]
1186 1186 else:
1187 1187 h = headt.pop()
1188 1188 visit = [h]
1189 1189 found = [h]
1190 1190 seen = {}
1191 1191 while visit:
1192 1192 n = visit.pop()
1193 1193 if n in seen:
1194 1194 continue
1195 1195 pp = chlog.parents(n)
1196 1196 tags = self.nodetags(n)
1197 1197 if tags:
1198 1198 for x in tags:
1199 1199 if x == 'tip':
1200 1200 continue
1201 1201 for f in found:
1202 1202 branches.setdefault(f, {})[n] = 1
1203 1203 branches.setdefault(n, {})[n] = 1
1204 1204 break
1205 1205 if n not in found:
1206 1206 found.append(n)
1207 1207 if branch in tags:
1208 1208 continue
1209 1209 seen[n] = 1
1210 1210 if pp[1] != nullid and n not in seenmerge:
1211 1211 merges.append((pp[1], [x for x in found]))
1212 1212 seenmerge[n] = 1
1213 1213 if pp[0] != nullid:
1214 1214 visit.append(pp[0])
1215 1215 # traverse the branches dict, eliminating branch tags from each
1216 1216 # head that are visible from another branch tag for that head.
1217 1217 out = {}
1218 1218 viscache = {}
1219 1219 for h in heads:
1220 1220 def visible(node):
1221 1221 if node in viscache:
1222 1222 return viscache[node]
1223 1223 ret = {}
1224 1224 visit = [node]
1225 1225 while visit:
1226 1226 x = visit.pop()
1227 1227 if x in viscache:
1228 1228 ret.update(viscache[x])
1229 1229 elif x not in ret:
1230 1230 ret[x] = 1
1231 1231 if x in branches:
1232 1232 visit[len(visit):] = branches[x].keys()
1233 1233 viscache[node] = ret
1234 1234 return ret
1235 1235 if h not in branches:
1236 1236 continue
1237 1237 # O(n^2), but somewhat limited. This only searches the
1238 1238 # tags visible from a specific head, not all the tags in the
1239 1239 # whole repo.
1240 1240 for b in branches[h]:
1241 1241 vis = False
1242 1242 for bb in branches[h].keys():
1243 1243 if b != bb:
1244 1244 if b in visible(bb):
1245 1245 vis = True
1246 1246 break
1247 1247 if not vis:
1248 1248 l = out.setdefault(h, [])
1249 1249 l[len(l):] = self.nodetags(b)
1250 1250 return out
1251 1251
1252 1252 def branches(self, nodes):
1253 1253 if not nodes: nodes = [self.changelog.tip()]
1254 1254 b = []
1255 1255 for n in nodes:
1256 1256 t = n
1257 1257 while n:
1258 1258 p = self.changelog.parents(n)
1259 1259 if p[1] != nullid or p[0] == nullid:
1260 1260 b.append((t, n, p[0], p[1]))
1261 1261 break
1262 1262 n = p[0]
1263 1263 return b
1264 1264
1265 1265 def between(self, pairs):
1266 1266 r = []
1267 1267
1268 1268 for top, bottom in pairs:
1269 1269 n, l, i = top, [], 0
1270 1270 f = 1
1271 1271
1272 1272 while n != bottom:
1273 1273 p = self.changelog.parents(n)[0]
1274 1274 if i == f:
1275 1275 l.append(n)
1276 1276 f = f * 2
1277 1277 n = p
1278 1278 i += 1
1279 1279
1280 1280 r.append(l)
1281 1281
1282 1282 return r
1283 1283
1284 1284 def newer(self, nodes):
1285 1285 m = {}
1286 1286 nl = []
1287 1287 pm = {}
1288 1288 cl = self.changelog
1289 1289 t = l = cl.count()
1290 1290
1291 1291 # find the lowest numbered node
1292 1292 for n in nodes:
1293 1293 l = min(l, cl.rev(n))
1294 1294 m[n] = 1
1295 1295
1296 1296 for i in xrange(l, t):
1297 1297 n = cl.node(i)
1298 1298 if n in m: # explicitly listed
1299 1299 pm[n] = 1
1300 1300 nl.append(n)
1301 1301 continue
1302 1302 for p in cl.parents(n):
1303 1303 if p in pm: # parent listed
1304 1304 pm[n] = 1
1305 1305 nl.append(n)
1306 1306 break
1307 1307
1308 1308 return nl
1309 1309
1310 1310 def findincoming(self, remote, base=None, heads=None):
1311 1311 m = self.changelog.nodemap
1312 1312 search = []
1313 1313 fetch = []
1314 1314 seen = {}
1315 1315 seenbranch = {}
1316 1316 if base == None:
1317 1317 base = {}
1318 1318
1319 1319 # assume we're closer to the tip than the root
1320 1320 # and start by examining the heads
1321 1321 self.ui.status("searching for changes\n")
1322 1322
1323 1323 if not heads:
1324 1324 heads = remote.heads()
1325 1325
1326 1326 unknown = []
1327 1327 for h in heads:
1328 1328 if h not in m:
1329 1329 unknown.append(h)
1330 1330 else:
1331 1331 base[h] = 1
1332 1332
1333 1333 if not unknown:
1334 1334 return None
1335 1335
1336 1336 rep = {}
1337 1337 reqcnt = 0
1338 1338
1339 1339 # search through remote branches
1340 1340 # a 'branch' here is a linear segment of history, with four parts:
1341 1341 # head, root, first parent, second parent
1342 1342 # (a branch always has two parents (or none) by definition)
1343 1343 unknown = remote.branches(unknown)
1344 1344 while unknown:
1345 1345 r = []
1346 1346 while unknown:
1347 1347 n = unknown.pop(0)
1348 1348 if n[0] in seen:
1349 1349 continue
1350 1350
1351 1351 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1352 1352 if n[0] == nullid:
1353 1353 break
1354 1354 if n in seenbranch:
1355 1355 self.ui.debug("branch already found\n")
1356 1356 continue
1357 1357 if n[1] and n[1] in m: # do we know the base?
1358 1358 self.ui.debug("found incomplete branch %s:%s\n"
1359 1359 % (short(n[0]), short(n[1])))
1360 1360 search.append(n) # schedule branch range for scanning
1361 1361 seenbranch[n] = 1
1362 1362 else:
1363 1363 if n[1] not in seen and n[1] not in fetch:
1364 1364 if n[2] in m and n[3] in m:
1365 1365 self.ui.debug("found new changeset %s\n" %
1366 1366 short(n[1]))
1367 1367 fetch.append(n[1]) # earliest unknown
1368 1368 base[n[2]] = 1 # latest known
1369 1369 continue
1370 1370
1371 1371 for a in n[2:4]:
1372 1372 if a not in rep:
1373 1373 r.append(a)
1374 1374 rep[a] = 1
1375 1375
1376 1376 seen[n[0]] = 1
1377 1377
1378 1378 if r:
1379 1379 reqcnt += 1
1380 1380 self.ui.debug("request %d: %s\n" %
1381 1381 (reqcnt, " ".join(map(short, r))))
1382 1382 for p in range(0, len(r), 10):
1383 1383 for b in remote.branches(r[p:p+10]):
1384 1384 self.ui.debug("received %s:%s\n" %
1385 1385 (short(b[0]), short(b[1])))
1386 1386 if b[0] not in m and b[0] not in seen:
1387 1387 unknown.append(b)
1388 1388
1389 1389 # do binary search on the branches we found
1390 1390 while search:
1391 1391 n = search.pop(0)
1392 1392 reqcnt += 1
1393 1393 l = remote.between([(n[0], n[1])])[0]
1394 1394 l.append(n[1])
1395 1395 p = n[0]
1396 1396 f = 1
1397 1397 for i in l:
1398 1398 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1399 1399 if i in m:
1400 1400 if f <= 2:
1401 1401 self.ui.debug("found new branch changeset %s\n" %
1402 1402 short(p))
1403 1403 fetch.append(p)
1404 1404 base[i] = 1
1405 1405 else:
1406 1406 self.ui.debug("narrowed branch search to %s:%s\n"
1407 1407 % (short(p), short(i)))
1408 1408 search.append((p, i))
1409 1409 break
1410 1410 p, f = i, f * 2
1411 1411
1412 1412 # sanity check our fetch list
1413 1413 for f in fetch:
1414 1414 if f in m:
1415 1415 raise RepoError("already have changeset " + short(f[:4]))
1416 1416
1417 1417 if base.keys() == [nullid]:
1418 1418 self.ui.warn("warning: pulling from an unrelated repository!\n")
1419 1419
1420 1420 self.ui.note("adding new changesets starting at " +
1421 1421 " ".join([short(f) for f in fetch]) + "\n")
1422 1422
1423 1423 self.ui.debug("%d total queries\n" % reqcnt)
1424 1424
1425 1425 return fetch
1426 1426
1427 1427 def findoutgoing(self, remote, base=None, heads=None):
1428 1428 if base == None:
1429 1429 base = {}
1430 1430 self.findincoming(remote, base, heads)
1431 1431
1432 1432 remain = dict.fromkeys(self.changelog.nodemap)
1433 1433
1434 1434 # prune everything remote has from the tree
1435 1435 del remain[nullid]
1436 1436 remove = base.keys()
1437 1437 while remove:
1438 1438 n = remove.pop(0)
1439 1439 if n in remain:
1440 1440 del remain[n]
1441 1441 for p in self.changelog.parents(n):
1442 1442 remove.append(p)
1443 1443
1444 1444 # find every node whose parents have been pruned
1445 1445 subset = []
1446 1446 for n in remain:
1447 1447 p1, p2 = self.changelog.parents(n)
1448 1448 if p1 not in remain and p2 not in remain:
1449 1449 subset.append(n)
1450 1450
1451 1451 # this is the set of all roots we have to push
1452 1452 return subset
1453 1453
1454 1454 def pull(self, remote):
1455 1455 lock = self.lock()
1456 1456
1457 1457 # if we have an empty repo, fetch everything
1458 1458 if self.changelog.tip() == nullid:
1459 1459 self.ui.status("requesting all changes\n")
1460 1460 fetch = [nullid]
1461 1461 else:
1462 1462 fetch = self.findincoming(remote)
1463 1463
1464 1464 if not fetch:
1465 1465 self.ui.status("no changes found\n")
1466 1466 return 1
1467 1467
1468 1468 cg = remote.changegroup(fetch)
1469 1469 return self.addchangegroup(cg)
1470 1470
1471 1471 def push(self, remote, force=False):
1472 1472 lock = remote.lock()
1473 1473
1474 1474 base = {}
1475 1475 heads = remote.heads()
1476 1476 inc = self.findincoming(remote, base, heads)
1477 1477 if not force and inc:
1478 1478 self.ui.warn("abort: unsynced remote changes!\n")
1479 1479 self.ui.status("(did you forget to sync? use push -f to force)\n")
1480 1480 return 1
1481 1481
1482 1482 update = self.findoutgoing(remote, base)
1483 1483 if not update:
1484 1484 self.ui.status("no changes found\n")
1485 1485 return 1
1486 1486 elif not force:
1487 1487 if len(heads) < len(self.changelog.heads()):
1488 1488 self.ui.warn("abort: push creates new remote branches!\n")
1489 1489 self.ui.status("(did you forget to merge?" +
1490 1490 " use push -f to force)\n")
1491 1491 return 1
1492 1492
1493 1493 cg = self.changegroup(update)
1494 1494 return remote.addchangegroup(cg)
1495 1495
1496 1496 def changegroup(self, basenodes):
1497 1497 class genread:
1498 1498 def __init__(self, generator):
1499 1499 self.g = generator
1500 1500 self.buf = ""
1501 1501 def fillbuf(self):
1502 1502 self.buf += "".join(self.g)
1503 1503
1504 1504 def read(self, l):
1505 1505 while l > len(self.buf):
1506 1506 try:
1507 1507 self.buf += self.g.next()
1508 1508 except StopIteration:
1509 1509 break
1510 1510 d, self.buf = self.buf[:l], self.buf[l:]
1511 1511 return d
1512 1512
1513 1513 def gengroup():
1514 1514 nodes = self.newer(basenodes)
1515 1515
1516 1516 # construct the link map
1517 1517 linkmap = {}
1518 1518 for n in nodes:
1519 1519 linkmap[self.changelog.rev(n)] = n
1520 1520
1521 1521 # construct a list of all changed files
1522 1522 changed = {}
1523 1523 for n in nodes:
1524 1524 c = self.changelog.read(n)
1525 1525 for f in c[3]:
1526 1526 changed[f] = 1
1527 1527 changed = changed.keys()
1528 1528 changed.sort()
1529 1529
1530 1530 # the changegroup is changesets + manifests + all file revs
1531 1531 revs = [ self.changelog.rev(n) for n in nodes ]
1532 1532
1533 1533 for y in self.changelog.group(linkmap): yield y
1534 1534 for y in self.manifest.group(linkmap): yield y
1535 1535 for f in changed:
1536 1536 yield struct.pack(">l", len(f) + 4) + f
1537 1537 g = self.file(f).group(linkmap)
1538 1538 for y in g:
1539 1539 yield y
1540 1540
1541 1541 yield struct.pack(">l", 0)
1542 1542
1543 1543 return genread(gengroup())
1544 1544
1545 1545 def addchangegroup(self, source):
1546 1546
1547 1547 def getchunk():
1548 1548 d = source.read(4)
1549 1549 if not d: return ""
1550 1550 l = struct.unpack(">l", d)[0]
1551 1551 if l <= 4: return ""
1552 1552 return source.read(l - 4)
1553 1553
1554 1554 def getgroup():
1555 1555 while 1:
1556 1556 c = getchunk()
1557 1557 if not c: break
1558 1558 yield c
1559 1559
1560 1560 def csmap(x):
1561 1561 self.ui.debug("add changeset %s\n" % short(x))
1562 1562 return self.changelog.count()
1563 1563
1564 1564 def revmap(x):
1565 1565 return self.changelog.rev(x)
1566 1566
1567 1567 if not source: return
1568 1568 changesets = files = revisions = 0
1569 1569
1570 1570 tr = self.transaction()
1571 1571
1572 oldheads = len(self.changelog.heads())
1573
1572 1574 # pull off the changeset group
1573 1575 self.ui.status("adding changesets\n")
1574 1576 co = self.changelog.tip()
1575 1577 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1576 1578 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1577 1579
1578 1580 # pull off the manifest group
1579 1581 self.ui.status("adding manifests\n")
1580 1582 mm = self.manifest.tip()
1581 1583 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1582 1584
1583 1585 # process the files
1584 1586 self.ui.status("adding file changes\n")
1585 1587 while 1:
1586 1588 f = getchunk()
1587 1589 if not f: break
1588 1590 self.ui.debug("adding %s revisions\n" % f)
1589 1591 fl = self.file(f)
1590 1592 o = fl.count()
1591 1593 n = fl.addgroup(getgroup(), revmap, tr)
1592 1594 revisions += fl.count() - o
1593 1595 files += 1
1594 1596
1597 newheads = len(self.changelog.heads())
1598 heads = ""
1599 if oldheads and newheads > oldheads:
1600 heads = " (+%d heads)" % (newheads - oldheads)
1601
1595 1602 self.ui.status(("added %d changesets" +
1596 " with %d changes to %d files\n")
1597 % (changesets, revisions, files))
1603 " with %d changes to %d files%s\n")
1604 % (changesets, revisions, files, heads))
1598 1605
1599 1606 tr.close()
1600 1607
1601 1608 if not self.hook("changegroup"):
1602 1609 return 1
1603 1610
1604 1611 return
1605 1612
1606 1613 def update(self, node, allow=False, force=False, choose=None,
1607 1614 moddirstate=True):
1608 1615 pl = self.dirstate.parents()
1609 1616 if not force and pl[1] != nullid:
1610 1617 self.ui.warn("aborting: outstanding uncommitted merges\n")
1611 1618 return 1
1612 1619
1613 1620 p1, p2 = pl[0], node
1614 1621 pa = self.changelog.ancestor(p1, p2)
1615 1622 m1n = self.changelog.read(p1)[0]
1616 1623 m2n = self.changelog.read(p2)[0]
1617 1624 man = self.manifest.ancestor(m1n, m2n)
1618 1625 m1 = self.manifest.read(m1n)
1619 1626 mf1 = self.manifest.readflags(m1n)
1620 1627 m2 = self.manifest.read(m2n)
1621 1628 mf2 = self.manifest.readflags(m2n)
1622 1629 ma = self.manifest.read(man)
1623 1630 mfa = self.manifest.readflags(man)
1624 1631
1625 1632 (c, a, d, u) = self.changes()
1626 1633
1627 1634 # is this a jump, or a merge? i.e. is there a linear path
1628 1635 # from p1 to p2?
1629 1636 linear_path = (pa == p1 or pa == p2)
1630 1637
1631 1638 # resolve the manifest to determine which files
1632 1639 # we care about merging
1633 1640 self.ui.note("resolving manifests\n")
1634 1641 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1635 1642 (force, allow, moddirstate, linear_path))
1636 1643 self.ui.debug(" ancestor %s local %s remote %s\n" %
1637 1644 (short(man), short(m1n), short(m2n)))
1638 1645
1639 1646 merge = {}
1640 1647 get = {}
1641 1648 remove = []
1642 1649
1643 1650 # construct a working dir manifest
1644 1651 mw = m1.copy()
1645 1652 mfw = mf1.copy()
1646 1653 umap = dict.fromkeys(u)
1647 1654
1648 1655 for f in a + c + u:
1649 1656 mw[f] = ""
1650 1657 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1651 1658
1652 1659 for f in d:
1653 1660 if f in mw: del mw[f]
1654 1661
1655 1662 # If we're jumping between revisions (as opposed to merging),
1656 1663 # and if neither the working directory nor the target rev has
1657 1664 # the file, then we need to remove it from the dirstate, to
1658 1665 # prevent the dirstate from listing the file when it is no
1659 1666 # longer in the manifest.
1660 1667 if moddirstate and linear_path and f not in m2:
1661 1668 self.dirstate.forget((f,))
1662 1669
1663 1670 # Compare manifests
1664 1671 for f, n in mw.iteritems():
1665 1672 if choose and not choose(f): continue
1666 1673 if f in m2:
1667 1674 s = 0
1668 1675
1669 1676 # is the wfile new since m1, and match m2?
1670 1677 if f not in m1:
1671 1678 t1 = self.wread(f)
1672 1679 t2 = self.file(f).read(m2[f])
1673 1680 if cmp(t1, t2) == 0:
1674 1681 n = m2[f]
1675 1682 del t1, t2
1676 1683
1677 1684 # are files different?
1678 1685 if n != m2[f]:
1679 1686 a = ma.get(f, nullid)
1680 1687 # are both different from the ancestor?
1681 1688 if n != a and m2[f] != a:
1682 1689 self.ui.debug(" %s versions differ, resolve\n" % f)
1683 1690 # merge executable bits
1684 1691 # "if we changed or they changed, change in merge"
1685 1692 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1686 1693 mode = ((a^b) | (a^c)) ^ a
1687 1694 merge[f] = (m1.get(f, nullid), m2[f], mode)
1688 1695 s = 1
1689 1696 # are we clobbering?
1690 1697 # is remote's version newer?
1691 1698 # or are we going back in time?
1692 1699 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1693 1700 self.ui.debug(" remote %s is newer, get\n" % f)
1694 1701 get[f] = m2[f]
1695 1702 s = 1
1696 1703 elif f in umap:
1697 1704 # this unknown file is the same as the checkout
1698 1705 get[f] = m2[f]
1699 1706
1700 1707 if not s and mfw[f] != mf2[f]:
1701 1708 if force:
1702 1709 self.ui.debug(" updating permissions for %s\n" % f)
1703 1710 util.set_exec(self.wjoin(f), mf2[f])
1704 1711 else:
1705 1712 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1706 1713 mode = ((a^b) | (a^c)) ^ a
1707 1714 if mode != b:
1708 1715 self.ui.debug(" updating permissions for %s\n" % f)
1709 1716 util.set_exec(self.wjoin(f), mode)
1710 1717 del m2[f]
1711 1718 elif f in ma:
1712 1719 if n != ma[f]:
1713 1720 r = "d"
1714 1721 if not force and (linear_path or allow):
1715 1722 r = self.ui.prompt(
1716 1723 (" local changed %s which remote deleted\n" % f) +
1717 1724 "(k)eep or (d)elete?", "[kd]", "k")
1718 1725 if r == "d":
1719 1726 remove.append(f)
1720 1727 else:
1721 1728 self.ui.debug("other deleted %s\n" % f)
1722 1729 remove.append(f) # other deleted it
1723 1730 else:
1724 1731 if n == m1.get(f, nullid): # same as parent
1725 1732 if p2 == pa: # going backwards?
1726 1733 self.ui.debug("remote deleted %s\n" % f)
1727 1734 remove.append(f)
1728 1735 else:
1729 1736 self.ui.debug("local created %s, keeping\n" % f)
1730 1737 else:
1731 1738 self.ui.debug("working dir created %s, keeping\n" % f)
1732 1739
1733 1740 for f, n in m2.iteritems():
1734 1741 if choose and not choose(f): continue
1735 1742 if f[0] == "/": continue
1736 1743 if f in ma and n != ma[f]:
1737 1744 r = "k"
1738 1745 if not force and (linear_path or allow):
1739 1746 r = self.ui.prompt(
1740 1747 ("remote changed %s which local deleted\n" % f) +
1741 1748 "(k)eep or (d)elete?", "[kd]", "k")
1742 1749 if r == "k": get[f] = n
1743 1750 elif f not in ma:
1744 1751 self.ui.debug("remote created %s\n" % f)
1745 1752 get[f] = n
1746 1753 else:
1747 1754 if force or p2 == pa: # going backwards?
1748 1755 self.ui.debug("local deleted %s, recreating\n" % f)
1749 1756 get[f] = n
1750 1757 else:
1751 1758 self.ui.debug("local deleted %s\n" % f)
1752 1759
1753 1760 del mw, m1, m2, ma
1754 1761
1755 1762 if force:
1756 1763 for f in merge:
1757 1764 get[f] = merge[f][1]
1758 1765 merge = {}
1759 1766
1760 1767 if linear_path or force:
1761 1768 # we don't need to do any magic, just jump to the new rev
1762 1769 branch_merge = False
1763 1770 p1, p2 = p2, nullid
1764 1771 else:
1765 1772 if not allow:
1766 1773 self.ui.status("this update spans a branch" +
1767 1774 " affecting the following files:\n")
1768 1775 fl = merge.keys() + get.keys()
1769 1776 fl.sort()
1770 1777 for f in fl:
1771 1778 cf = ""
1772 1779 if f in merge: cf = " (resolve)"
1773 1780 self.ui.status(" %s%s\n" % (f, cf))
1774 1781 self.ui.warn("aborting update spanning branches!\n")
1775 1782 self.ui.status("(use update -m to merge across branches" +
1776 1783 " or -C to lose changes)\n")
1777 1784 return 1
1778 1785 branch_merge = True
1779 1786
1780 1787 if moddirstate:
1781 1788 self.dirstate.setparents(p1, p2)
1782 1789
1783 1790 # get the files we don't need to change
1784 1791 files = get.keys()
1785 1792 files.sort()
1786 1793 for f in files:
1787 1794 if f[0] == "/": continue
1788 1795 self.ui.note("getting %s\n" % f)
1789 1796 t = self.file(f).read(get[f])
1790 1797 try:
1791 1798 self.wwrite(f, t)
1792 1799 except IOError:
1793 1800 os.makedirs(os.path.dirname(self.wjoin(f)))
1794 1801 self.wwrite(f, t)
1795 1802 util.set_exec(self.wjoin(f), mf2[f])
1796 1803 if moddirstate:
1797 1804 if branch_merge:
1798 1805 self.dirstate.update([f], 'n', st_mtime=-1)
1799 1806 else:
1800 1807 self.dirstate.update([f], 'n')
1801 1808
1802 1809 # merge the tricky bits
1803 1810 files = merge.keys()
1804 1811 files.sort()
1805 1812 for f in files:
1806 1813 self.ui.status("merging %s\n" % f)
1807 1814 my, other, flag = merge[f]
1808 1815 self.merge3(f, my, other)
1809 1816 util.set_exec(self.wjoin(f), flag)
1810 1817 if moddirstate:
1811 1818 if branch_merge:
1812 1819 # We've done a branch merge, mark this file as merged
1813 1820 # so that we properly record the merger later
1814 1821 self.dirstate.update([f], 'm')
1815 1822 else:
1816 1823 # We've update-merged a locally modified file, so
1817 1824 # we set the dirstate to emulate a normal checkout
1818 1825 # of that file some time in the past. Thus our
1819 1826 # merge will appear as a normal local file
1820 1827 # modification.
1821 1828 f_len = len(self.file(f).read(other))
1822 1829 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1823 1830
1824 1831 remove.sort()
1825 1832 for f in remove:
1826 1833 self.ui.note("removing %s\n" % f)
1827 1834 try:
1828 1835 os.unlink(self.wjoin(f))
1829 1836 except OSError, inst:
1830 1837 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1831 1838 # try removing directories that might now be empty
1832 1839 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1833 1840 except: pass
1834 1841 if moddirstate:
1835 1842 if branch_merge:
1836 1843 self.dirstate.update(remove, 'r')
1837 1844 else:
1838 1845 self.dirstate.forget(remove)
1839 1846
1840 1847 def merge3(self, fn, my, other):
1841 1848 """perform a 3-way merge in the working directory"""
1842 1849
1843 1850 def temp(prefix, node):
1844 1851 pre = "%s~%s." % (os.path.basename(fn), prefix)
1845 1852 (fd, name) = tempfile.mkstemp("", pre)
1846 1853 f = os.fdopen(fd, "wb")
1847 1854 self.wwrite(fn, fl.read(node), f)
1848 1855 f.close()
1849 1856 return name
1850 1857
1851 1858 fl = self.file(fn)
1852 1859 base = fl.ancestor(my, other)
1853 1860 a = self.wjoin(fn)
1854 1861 b = temp("base", base)
1855 1862 c = temp("other", other)
1856 1863
1857 1864 self.ui.note("resolving %s\n" % fn)
1858 1865 self.ui.debug("file %s: other %s ancestor %s\n" %
1859 1866 (fn, short(other), short(base)))
1860 1867
1861 1868 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1862 1869 or "hgmerge")
1863 1870 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1864 1871 if r:
1865 1872 self.ui.warn("merging %s failed!\n" % fn)
1866 1873
1867 1874 os.unlink(b)
1868 1875 os.unlink(c)
1869 1876
1870 1877 def verify(self):
1871 1878 filelinkrevs = {}
1872 1879 filenodes = {}
1873 1880 changesets = revisions = files = 0
1874 1881 errors = 0
1875 1882
1876 1883 seen = {}
1877 1884 self.ui.status("checking changesets\n")
1878 1885 for i in range(self.changelog.count()):
1879 1886 changesets += 1
1880 1887 n = self.changelog.node(i)
1881 1888 if n in seen:
1882 1889 self.ui.warn("duplicate changeset at revision %d\n" % i)
1883 1890 errors += 1
1884 1891 seen[n] = 1
1885 1892
1886 1893 for p in self.changelog.parents(n):
1887 1894 if p not in self.changelog.nodemap:
1888 1895 self.ui.warn("changeset %s has unknown parent %s\n" %
1889 1896 (short(n), short(p)))
1890 1897 errors += 1
1891 1898 try:
1892 1899 changes = self.changelog.read(n)
1893 1900 except Exception, inst:
1894 1901 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1895 1902 errors += 1
1896 1903
1897 1904 for f in changes[3]:
1898 1905 filelinkrevs.setdefault(f, []).append(i)
1899 1906
1900 1907 seen = {}
1901 1908 self.ui.status("checking manifests\n")
1902 1909 for i in range(self.manifest.count()):
1903 1910 n = self.manifest.node(i)
1904 1911 if n in seen:
1905 1912 self.ui.warn("duplicate manifest at revision %d\n" % i)
1906 1913 errors += 1
1907 1914 seen[n] = 1
1908 1915
1909 1916 for p in self.manifest.parents(n):
1910 1917 if p not in self.manifest.nodemap:
1911 1918 self.ui.warn("manifest %s has unknown parent %s\n" %
1912 1919 (short(n), short(p)))
1913 1920 errors += 1
1914 1921
1915 1922 try:
1916 1923 delta = mdiff.patchtext(self.manifest.delta(n))
1917 1924 except KeyboardInterrupt:
1918 1925 self.ui.warn("aborted")
1919 1926 sys.exit(0)
1920 1927 except Exception, inst:
1921 1928 self.ui.warn("unpacking manifest %s: %s\n"
1922 1929 % (short(n), inst))
1923 1930 errors += 1
1924 1931
1925 1932 ff = [ l.split('\0') for l in delta.splitlines() ]
1926 1933 for f, fn in ff:
1927 1934 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1928 1935
1929 1936 self.ui.status("crosschecking files in changesets and manifests\n")
1930 1937 for f in filenodes:
1931 1938 if f not in filelinkrevs:
1932 1939 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1933 1940 errors += 1
1934 1941
1935 1942 for f in filelinkrevs:
1936 1943 if f not in filenodes:
1937 1944 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1938 1945 errors += 1
1939 1946
1940 1947 self.ui.status("checking files\n")
1941 1948 ff = filenodes.keys()
1942 1949 ff.sort()
1943 1950 for f in ff:
1944 1951 if f == "/dev/null": continue
1945 1952 files += 1
1946 1953 fl = self.file(f)
1947 1954 nodes = { nullid: 1 }
1948 1955 seen = {}
1949 1956 for i in range(fl.count()):
1950 1957 revisions += 1
1951 1958 n = fl.node(i)
1952 1959
1953 1960 if n in seen:
1954 1961 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1955 1962 errors += 1
1956 1963
1957 1964 if n not in filenodes[f]:
1958 1965 self.ui.warn("%s: %d:%s not in manifests\n"
1959 1966 % (f, i, short(n)))
1960 1967 errors += 1
1961 1968 else:
1962 1969 del filenodes[f][n]
1963 1970
1964 1971 flr = fl.linkrev(n)
1965 1972 if flr not in filelinkrevs[f]:
1966 1973 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1967 1974 % (f, short(n), fl.linkrev(n)))
1968 1975 errors += 1
1969 1976 else:
1970 1977 filelinkrevs[f].remove(flr)
1971 1978
1972 1979 # verify contents
1973 1980 try:
1974 1981 t = fl.read(n)
1975 1982 except Exception, inst:
1976 1983 self.ui.warn("unpacking file %s %s: %s\n"
1977 1984 % (f, short(n), inst))
1978 1985 errors += 1
1979 1986
1980 1987 # verify parents
1981 1988 (p1, p2) = fl.parents(n)
1982 1989 if p1 not in nodes:
1983 1990 self.ui.warn("file %s:%s unknown parent 1 %s" %
1984 1991 (f, short(n), short(p1)))
1985 1992 errors += 1
1986 1993 if p2 not in nodes:
1987 1994 self.ui.warn("file %s:%s unknown parent 2 %s" %
1988 1995 (f, short(n), short(p1)))
1989 1996 errors += 1
1990 1997 nodes[n] = 1
1991 1998
1992 1999 # cross-check
1993 2000 for node in filenodes[f]:
1994 2001 self.ui.warn("node %s in manifests not in %s\n"
1995 2002 % (hex(node), f))
1996 2003 errors += 1
1997 2004
1998 2005 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1999 2006 (files, changesets, revisions))
2000 2007
2001 2008 if errors:
2002 2009 self.ui.warn("%d integrity errors encountered!\n" % errors)
2003 2010 return 1
2004 2011
2005 2012 class remoterepository:
2006 2013 def local(self):
2007 2014 return False
2008 2015
2009 2016 class httprepository(remoterepository):
2010 2017 def __init__(self, ui, path):
2011 2018 # fix missing / after hostname
2012 2019 s = urlparse.urlsplit(path)
2013 2020 partial = s[2]
2014 2021 if not partial: partial = "/"
2015 2022 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
2016 2023 self.ui = ui
2017 2024 no_list = [ "localhost", "127.0.0.1" ]
2018 2025 host = ui.config("http_proxy", "host")
2019 2026 if host is None:
2020 2027 host = os.environ.get("http_proxy")
2021 2028 if host and host.startswith('http://'):
2022 2029 host = host[7:]
2023 2030 user = ui.config("http_proxy", "user")
2024 2031 passwd = ui.config("http_proxy", "passwd")
2025 2032 no = ui.config("http_proxy", "no")
2026 2033 if no is None:
2027 2034 no = os.environ.get("no_proxy")
2028 2035 if no:
2029 2036 no_list = no_list + no.split(",")
2030 2037
2031 2038 no_proxy = 0
2032 2039 for h in no_list:
2033 2040 if (path.startswith("http://" + h + "/") or
2034 2041 path.startswith("http://" + h + ":") or
2035 2042 path == "http://" + h):
2036 2043 no_proxy = 1
2037 2044
2038 2045 # Note: urllib2 takes proxy values from the environment and those will
2039 2046 # take precedence
2040 2047 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
2041 2048 try:
2042 2049 if os.environ.has_key(env):
2043 2050 del os.environ[env]
2044 2051 except OSError:
2045 2052 pass
2046 2053
2047 2054 proxy_handler = urllib2.BaseHandler()
2048 2055 if host and not no_proxy:
2049 2056 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
2050 2057
2051 2058 authinfo = None
2052 2059 if user and passwd:
2053 2060 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
2054 2061 passmgr.add_password(None, host, user, passwd)
2055 2062 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
2056 2063
2057 2064 opener = urllib2.build_opener(proxy_handler, authinfo)
2058 2065 urllib2.install_opener(opener)
2059 2066
2060 2067 def dev(self):
2061 2068 return -1
2062 2069
2063 2070 def do_cmd(self, cmd, **args):
2064 2071 self.ui.debug("sending %s command\n" % cmd)
2065 2072 q = {"cmd": cmd}
2066 2073 q.update(args)
2067 2074 qs = urllib.urlencode(q)
2068 2075 cu = "%s?%s" % (self.url, qs)
2069 2076 resp = urllib2.urlopen(cu)
2070 2077 proto = resp.headers['content-type']
2071 2078
2072 2079 # accept old "text/plain" and "application/hg-changegroup" for now
2073 2080 if not proto.startswith('application/mercurial') and \
2074 2081 not proto.startswith('text/plain') and \
2075 2082 not proto.startswith('application/hg-changegroup'):
2076 2083 raise RepoError("'%s' does not appear to be an hg repository"
2077 2084 % self.url)
2078 2085
2079 2086 if proto.startswith('application/mercurial'):
2080 2087 version = proto[22:]
2081 2088 if float(version) > 0.1:
2082 2089 raise RepoError("'%s' uses newer protocol %s" %
2083 2090 (self.url, version))
2084 2091
2085 2092 return resp
2086 2093
2087 2094 def heads(self):
2088 2095 d = self.do_cmd("heads").read()
2089 2096 try:
2090 2097 return map(bin, d[:-1].split(" "))
2091 2098 except:
2092 2099 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2093 2100 raise
2094 2101
2095 2102 def branches(self, nodes):
2096 2103 n = " ".join(map(hex, nodes))
2097 2104 d = self.do_cmd("branches", nodes=n).read()
2098 2105 try:
2099 2106 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2100 2107 return br
2101 2108 except:
2102 2109 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2103 2110 raise
2104 2111
2105 2112 def between(self, pairs):
2106 2113 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2107 2114 d = self.do_cmd("between", pairs=n).read()
2108 2115 try:
2109 2116 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2110 2117 return p
2111 2118 except:
2112 2119 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2113 2120 raise
2114 2121
2115 2122 def changegroup(self, nodes):
2116 2123 n = " ".join(map(hex, nodes))
2117 2124 f = self.do_cmd("changegroup", roots=n)
2118 2125 bytes = 0
2119 2126
2120 2127 class zread:
2121 2128 def __init__(self, f):
2122 2129 self.zd = zlib.decompressobj()
2123 2130 self.f = f
2124 2131 self.buf = ""
2125 2132 def read(self, l):
2126 2133 while l > len(self.buf):
2127 2134 r = self.f.read(4096)
2128 2135 if r:
2129 2136 self.buf += self.zd.decompress(r)
2130 2137 else:
2131 2138 self.buf += self.zd.flush()
2132 2139 break
2133 2140 d, self.buf = self.buf[:l], self.buf[l:]
2134 2141 return d
2135 2142
2136 2143 return zread(f)
2137 2144
2138 2145 class remotelock:
2139 2146 def __init__(self, repo):
2140 2147 self.repo = repo
2141 2148 def release(self):
2142 2149 self.repo.unlock()
2143 2150 self.repo = None
2144 2151 def __del__(self):
2145 2152 if self.repo:
2146 2153 self.release()
2147 2154
2148 2155 class sshrepository(remoterepository):
2149 2156 def __init__(self, ui, path):
2150 2157 self.url = path
2151 2158 self.ui = ui
2152 2159
2153 2160 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))', path)
2154 2161 if not m:
2155 2162 raise RepoError("couldn't parse destination %s" % path)
2156 2163
2157 2164 self.user = m.group(2)
2158 2165 self.host = m.group(3)
2159 2166 self.port = m.group(5)
2160 2167 self.path = m.group(7)
2161 2168
2162 2169 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2163 2170 args = self.port and ("%s -p %s") % (args, self.port) or args
2164 2171 path = self.path or ""
2165 2172
2166 2173 if not path:
2167 2174 raise RepoError("no remote repository path specified")
2168 2175
2169 2176 sshcmd = self.ui.config("ui", "ssh", "ssh")
2170 2177 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2171 2178 cmd = "%s %s '%s -R %s serve --stdio'"
2172 2179 cmd = cmd % (sshcmd, args, remotecmd, path)
2173 2180
2174 2181 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2175 2182
2176 2183 def readerr(self):
2177 2184 while 1:
2178 2185 r,w,x = select.select([self.pipee], [], [], 0)
2179 2186 if not r: break
2180 2187 l = self.pipee.readline()
2181 2188 if not l: break
2182 2189 self.ui.status("remote: ", l)
2183 2190
2184 2191 def __del__(self):
2185 2192 try:
2186 2193 self.pipeo.close()
2187 2194 self.pipei.close()
2188 2195 for l in self.pipee:
2189 2196 self.ui.status("remote: ", l)
2190 2197 self.pipee.close()
2191 2198 except:
2192 2199 pass
2193 2200
2194 2201 def dev(self):
2195 2202 return -1
2196 2203
2197 2204 def do_cmd(self, cmd, **args):
2198 2205 self.ui.debug("sending %s command\n" % cmd)
2199 2206 self.pipeo.write("%s\n" % cmd)
2200 2207 for k, v in args.items():
2201 2208 self.pipeo.write("%s %d\n" % (k, len(v)))
2202 2209 self.pipeo.write(v)
2203 2210 self.pipeo.flush()
2204 2211
2205 2212 return self.pipei
2206 2213
2207 2214 def call(self, cmd, **args):
2208 2215 r = self.do_cmd(cmd, **args)
2209 2216 l = r.readline()
2210 2217 self.readerr()
2211 2218 try:
2212 2219 l = int(l)
2213 2220 except:
2214 2221 raise RepoError("unexpected response '%s'" % l)
2215 2222 return r.read(l)
2216 2223
2217 2224 def lock(self):
2218 2225 self.call("lock")
2219 2226 return remotelock(self)
2220 2227
2221 2228 def unlock(self):
2222 2229 self.call("unlock")
2223 2230
2224 2231 def heads(self):
2225 2232 d = self.call("heads")
2226 2233 try:
2227 2234 return map(bin, d[:-1].split(" "))
2228 2235 except:
2229 2236 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2230 2237
2231 2238 def branches(self, nodes):
2232 2239 n = " ".join(map(hex, nodes))
2233 2240 d = self.call("branches", nodes=n)
2234 2241 try:
2235 2242 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2236 2243 return br
2237 2244 except:
2238 2245 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2239 2246
2240 2247 def between(self, pairs):
2241 2248 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2242 2249 d = self.call("between", pairs=n)
2243 2250 try:
2244 2251 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2245 2252 return p
2246 2253 except:
2247 2254 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2248 2255
2249 2256 def changegroup(self, nodes):
2250 2257 n = " ".join(map(hex, nodes))
2251 2258 f = self.do_cmd("changegroup", roots=n)
2252 2259 return self.pipei
2253 2260
2254 2261 def addchangegroup(self, cg):
2255 2262 d = self.call("addchangegroup")
2256 2263 if d:
2257 2264 raise RepoError("push refused: %s", d)
2258 2265
2259 2266 while 1:
2260 2267 d = cg.read(4096)
2261 2268 if not d: break
2262 2269 self.pipeo.write(d)
2263 2270 self.readerr()
2264 2271
2265 2272 self.pipeo.flush()
2266 2273
2267 2274 self.readerr()
2268 2275 l = int(self.pipei.readline())
2269 2276 return self.pipei.read(l) != ""
2270 2277
2271 2278 class httpsrepository(httprepository):
2272 2279 pass
2273 2280
2274 2281 def repository(ui, path=None, create=0):
2275 2282 if path:
2276 2283 if path.startswith("http://"):
2277 2284 return httprepository(ui, path)
2278 2285 if path.startswith("https://"):
2279 2286 return httpsrepository(ui, path)
2280 2287 if path.startswith("hg://"):
2281 2288 return httprepository(ui, path.replace("hg://", "http://"))
2282 2289 if path.startswith("old-http://"):
2283 2290 return localrepository(ui, path.replace("old-http://", "http://"))
2284 2291 if path.startswith("ssh://"):
2285 2292 return sshrepository(ui, path)
2286 2293
2287 2294 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now